Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a573f1cd20 | |||
| f93cf5fa9c | |||
| b879d62384 | |||
| c99ffd4394 | |||
| ed8c07c1c5 | |||
| cf2b016433 | |||
| 2899885c42 | |||
| f3b4838cf6 | |||
| 8ed30e9960 | |||
| c7de3919b0 | |||
| 840066004a | |||
| 9fb93a3802 | |||
| ecc7e5ab1e | |||
| a934c42aa1 | |||
| 948986ba60 | |||
| 3be86573aa | |||
| e3a6355edb | |||
| 66d16acebc | |||
| fdff2e0afe | |||
| 3d9625b40b | |||
| 547a0d8f32 | |||
| 876a0a2c2b | |||
| a37d28e3d7 | |||
| ddf26e00af | |||
| e4db163efe | |||
| 571443c282 | |||
| aba5e519a4 | |||
| 5fcba0e814 | |||
| 928c643d8d | |||
| fd9f9f6d66 | |||
| a5b7727c8f | |||
| 3135c18d95 | |||
| 1d32a64dc0 | |||
| d70ca5ee87 | |||
| eca3a229a4 | |||
| 4c1eb03671 | |||
| f463eeed88 | |||
| 289c9d2343 | |||
| e375963243 | |||
| 31baa10b3b | |||
| 0556c7c56d | |||
| 83c95d9db8 | |||
| beccb551e2 | |||
| c761d98b82 | |||
| e68d22337b | |||
| 4cb6f5b6f0 | |||
| 6d5708800f | |||
| fa3eb821e6 | |||
| dd5ed403b9 | |||
| b4fde22c31 | |||
| b92e16fa4d | |||
| 6fbdece4be |
@@ -2,36 +2,43 @@
|
|||||||
# See: https://circleci.com/docs/2.0/configuration-reference
|
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||||
version: 2.1
|
version: 2.1
|
||||||
|
|
||||||
commands:
|
|
||||||
setup-bazel:
|
|
||||||
description: |
|
|
||||||
Setup the Bazel build system used for building the repo
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
name: Add Bazel Apt repository
|
|
||||||
command: |
|
|
||||||
sudo apt install curl gnupg
|
|
||||||
curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor > bazel.gpg
|
|
||||||
sudo mv bazel.gpg /etc/apt/trusted.gpg.d/
|
|
||||||
echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list
|
|
||||||
- run:
|
|
||||||
name: Install Bazel from Apt
|
|
||||||
command: sudo apt update && sudo apt install bazel
|
|
||||||
|
|
||||||
# Define a job to be invoked later in a workflow.
|
# Define a job to be invoked later in a workflow.
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||||
jobs:
|
jobs:
|
||||||
|
lint:
|
||||||
|
working_directory: ~/repo
|
||||||
|
docker:
|
||||||
|
- image: cimg/go:1.22.2
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- restore_cache:
|
||||||
|
keys:
|
||||||
|
- go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
- run:
|
||||||
|
name: Install Dependencies
|
||||||
|
command: go mod download
|
||||||
|
- save_cache:
|
||||||
|
key: go-mod-v4-{{ checksum "go.sum" }}
|
||||||
|
paths:
|
||||||
|
- "/go/pkg/mod"
|
||||||
|
- run:
|
||||||
|
name: Install golangci-lint
|
||||||
|
command: |
|
||||||
|
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
|
||||||
|
- run:
|
||||||
|
name: Run golangci-lint
|
||||||
|
command: golangci-lint run --timeout=5m
|
||||||
|
|
||||||
testbuild:
|
testbuild:
|
||||||
working_directory: ~/repo
|
working_directory: ~/repo
|
||||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/golang:1.15.8
|
- image: cimg/go:1.22.2
|
||||||
# Add steps to the job
|
# Add steps to the job
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- setup-bazel
|
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
keys:
|
keys:
|
||||||
- go-mod-v4-{{ checksum "go.sum" }}
|
- go-mod-v4-{{ checksum "go.sum" }}
|
||||||
@@ -44,16 +51,17 @@ jobs:
|
|||||||
- "/go/pkg/mod"
|
- "/go/pkg/mod"
|
||||||
- run:
|
- run:
|
||||||
name: Run tests
|
name: Run tests
|
||||||
command: bazel test //...
|
command: go test -race ./...
|
||||||
- run:
|
- run:
|
||||||
name: Run build
|
name: Run build
|
||||||
command: bazel build //...
|
command: go build ./...
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
|
|
||||||
# Invoke jobs via workflows
|
# Invoke jobs via workflows
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||||
|
# Linting is disabled while cleanups are ongoing.
|
||||||
workflows:
|
workflows:
|
||||||
testbuild:
|
testbuild:
|
||||||
jobs:
|
jobs:
|
||||||
- testbuild
|
- testbuild
|
||||||
|
# - lint
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1 @@
|
|||||||
bazel-bin
|
.idea
|
||||||
bazel-goutils
|
|
||||||
bazel-out
|
|
||||||
bazel-testlogs
|
|
||||||
|
|||||||
477
.golangci.yml
Normal file
477
.golangci.yml
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
# This file is licensed under the terms of the MIT license https://opensource.org/license/mit
|
||||||
|
# Copyright (c) 2021-2025 Marat Reymers
|
||||||
|
|
||||||
|
## Golden config for golangci-lint v2.6.2
|
||||||
|
#
|
||||||
|
# This is the best config for golangci-lint based on my experience and opinion.
|
||||||
|
# It is very strict, but not extremely strict.
|
||||||
|
# Feel free to adapt it to suit your needs.
|
||||||
|
# If this config helps you, please consider keeping a link to this file (see the next comment).
|
||||||
|
|
||||||
|
# Based on https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322
|
||||||
|
|
||||||
|
version: "2"
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# Maximum count of issues with the same text.
|
||||||
|
# Set to 0 to disable.
|
||||||
|
# Default: 3
|
||||||
|
max-same-issues: 50
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- goimports # checks if the code and import statements are formatted according to the 'goimports' command
|
||||||
|
- golines # checks if code is formatted, and fixes long lines
|
||||||
|
|
||||||
|
## you may want to enable
|
||||||
|
#- gci # checks if code and import statements are formatted, with additional rules
|
||||||
|
#- gofmt # checks if the code is formatted according to 'gofmt' command
|
||||||
|
#- gofumpt # enforces a stricter format than 'gofmt', while being backwards compatible
|
||||||
|
#- swaggo # formats swaggo comments
|
||||||
|
|
||||||
|
# All settings can be found here https://github.com/golangci/golangci-lint/blob/HEAD/.golangci.reference.yml
|
||||||
|
settings:
|
||||||
|
goimports:
|
||||||
|
# A list of prefixes, which, if set, checks import paths
|
||||||
|
# with the given prefixes are grouped after 3rd-party packages.
|
||||||
|
# Default: []
|
||||||
|
local-prefixes:
|
||||||
|
- github.com/my/project
|
||||||
|
|
||||||
|
golines:
|
||||||
|
# Target maximum line length.
|
||||||
|
# Default: 100
|
||||||
|
max-len: 120
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- asasalint # checks for pass []any as any in variadic func(...any)
|
||||||
|
- asciicheck # checks that your code does not contain non-ASCII identifiers
|
||||||
|
- bidichk # checks for dangerous unicode character sequences
|
||||||
|
- bodyclose # checks whether HTTP response body is closed successfully
|
||||||
|
- canonicalheader # checks whether net/http.Header uses canonical header
|
||||||
|
- copyloopvar # detects places where loop variables are copied (Go 1.22+)
|
||||||
|
- cyclop # checks function and package cyclomatic complexity
|
||||||
|
- depguard # checks if package imports are in a list of acceptable packages
|
||||||
|
- dupl # tool for code clone detection
|
||||||
|
- durationcheck # checks for two durations multiplied together
|
||||||
|
- errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases
|
||||||
|
- errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
|
||||||
|
- errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13
|
||||||
|
- exhaustive # checks exhaustiveness of enum switch statements
|
||||||
|
- exptostd # detects functions from golang.org/x/exp/ that can be replaced by std functions
|
||||||
|
- fatcontext # detects nested contexts in loops
|
||||||
|
- forbidigo # forbids identifiers
|
||||||
|
- funcorder # checks the order of functions, methods, and constructors
|
||||||
|
- funlen # tool for detection of long functions
|
||||||
|
- gocheckcompilerdirectives # validates go compiler directive comments (//go:)
|
||||||
|
- gochecksumtype # checks exhaustiveness on Go "sum types"
|
||||||
|
- gocognit # computes and checks the cognitive complexity of functions
|
||||||
|
- goconst # finds repeated strings that could be replaced by a constant
|
||||||
|
- gocritic # provides diagnostics that check for bugs, performance and style issues
|
||||||
|
- gocyclo # computes and checks the cyclomatic complexity of functions
|
||||||
|
- godoclint # checks Golang's documentation practice
|
||||||
|
- godot # checks if comments end in a period
|
||||||
|
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
|
||||||
|
- goprintffuncname # checks that printf-like functions are named with f at the end
|
||||||
|
- gosec # inspects source code for security problems
|
||||||
|
- govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
|
||||||
|
- iface # checks the incorrect use of interfaces, helping developers avoid interface pollution
|
||||||
|
- ineffassign # detects when assignments to existing variables are not used
|
||||||
|
- intrange # finds places where for loops could make use of an integer range
|
||||||
|
- iotamixing # checks if iotas are being used in const blocks with other non-iota declarations
|
||||||
|
- loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap)
|
||||||
|
- makezero # finds slice declarations with non-zero initial length
|
||||||
|
- mirror # reports wrong mirror patterns of bytes/strings usage
|
||||||
|
- mnd # detects magic numbers
|
||||||
|
- modernize # suggests simplifications to Go code, using modern language and library features
|
||||||
|
- musttag # enforces field tags in (un)marshaled structs
|
||||||
|
- nakedret # finds naked returns in functions greater than a specified function length
|
||||||
|
- nestif # reports deeply nested if statements
|
||||||
|
- nilerr # finds the code that returns nil even if it checks that the error is not nil
|
||||||
|
- nilnesserr # reports that it checks for err != nil, but it returns a different nil value error (powered by nilness and nilerr)
|
||||||
|
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
|
||||||
|
- noctx # finds sending http request without context.Context
|
||||||
|
- nolintlint # reports ill-formed or insufficient nolint directives
|
||||||
|
- nonamedreturns # reports all named returns
|
||||||
|
- nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL
|
||||||
|
- perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative
|
||||||
|
- predeclared # finds code that shadows one of Go's predeclared identifiers
|
||||||
|
- promlinter # checks Prometheus metrics naming via promlint
|
||||||
|
- protogetter # reports direct reads from proto message fields when getters should be used
|
||||||
|
- reassign # checks that package variables are not reassigned
|
||||||
|
- recvcheck # checks for receiver type consistency
|
||||||
|
- revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint
|
||||||
|
- rowserrcheck # checks whether Err of rows is checked successfully
|
||||||
|
- sloglint # ensure consistent code style when using log/slog
|
||||||
|
- spancheck # checks for mistakes with OpenTelemetry/Census spans
|
||||||
|
- sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
|
||||||
|
- staticcheck # is a go vet on steroids, applying a ton of static analysis checks
|
||||||
|
- testableexamples # checks if examples are testable (have an expected output)
|
||||||
|
- testifylint # checks usage of github.com/stretchr/testify
|
||||||
|
- testpackage # makes you use a separate _test package
|
||||||
|
- tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes
|
||||||
|
- unconvert # removes unnecessary type conversions
|
||||||
|
- unparam # reports unused function parameters
|
||||||
|
- unqueryvet # detects SELECT * in SQL queries and SQL builders, encouraging explicit column selection
|
||||||
|
- unused # checks for unused constants, variables, functions and types
|
||||||
|
- usestdlibvars # detects the possibility to use variables/constants from the Go standard library
|
||||||
|
- usetesting # reports uses of functions with replacement inside the testing package
|
||||||
|
- wastedassign # finds wasted assignment statements
|
||||||
|
- whitespace # detects leading and trailing whitespace
|
||||||
|
|
||||||
|
## you may want to enable
|
||||||
|
#- arangolint # opinionated best practices for arangodb client
|
||||||
|
#- decorder # checks declaration order and count of types, constants, variables and functions
|
||||||
|
#- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized
|
||||||
|
#- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega
|
||||||
|
#- godox # detects usage of FIXME, TODO and other keywords inside comments
|
||||||
|
#- goheader # checks is file header matches to pattern
|
||||||
|
#- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters
|
||||||
|
#- interfacebloat # checks the number of methods inside an interface
|
||||||
|
#- ireturn # accept interfaces, return concrete types
|
||||||
|
#- noinlineerr # disallows inline error handling `if err := ...; err != nil {`
|
||||||
|
#- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated
|
||||||
|
#- tagalign # checks that struct tags are well aligned
|
||||||
|
#- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope
|
||||||
|
#- wrapcheck # checks that errors returned from external packages are wrapped
|
||||||
|
#- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event
|
||||||
|
|
||||||
|
## disabled
|
||||||
|
#- containedctx # detects struct contained context.Context field
|
||||||
|
#- contextcheck # [too many false positives] checks the function whether use a non-inherited context
|
||||||
|
#- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
|
||||||
|
#- dupword # [useless without config] checks for duplicate words in the source code
|
||||||
|
#- err113 # [too strict] checks the errors handling expressions
|
||||||
|
#- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted
|
||||||
|
#- forcetypeassert # [replaced by errcheck] finds forced type assertions
|
||||||
|
#- gomodguard # [use more powerful depguard] allow and block lists linter for direct Go module dependencies
|
||||||
|
#- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase
|
||||||
|
#- grouper # analyzes expression groups
|
||||||
|
#- importas # enforces consistent import aliases
|
||||||
|
#- lll # [replaced by golines] reports long lines
|
||||||
|
#- maintidx # measures the maintainability index of each function
|
||||||
|
#- misspell # [useless] finds commonly misspelled English words in comments
|
||||||
|
#- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity
|
||||||
|
#- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test
|
||||||
|
#- tagliatelle # checks the struct tags
|
||||||
|
#- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers
|
||||||
|
#- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines
|
||||||
|
#- wsl_v5 # [too strict and mostly code is not more readable] add or remove empty lines
|
||||||
|
|
||||||
|
# All settings can be found here https://github.com/golangci/golangci-lint/blob/HEAD/.golangci.reference.yml
|
||||||
|
settings:
|
||||||
|
cyclop:
|
||||||
|
# The maximal code complexity to report.
|
||||||
|
# Default: 10
|
||||||
|
max-complexity: 30
|
||||||
|
# The maximal average package complexity.
|
||||||
|
# If it's higher than 0.0 (float) the check is enabled.
|
||||||
|
# Default: 0.0
|
||||||
|
package-average: 10.0
|
||||||
|
|
||||||
|
depguard:
|
||||||
|
# Rules to apply.
|
||||||
|
#
|
||||||
|
# Variables:
|
||||||
|
# - File Variables
|
||||||
|
# Use an exclamation mark `!` to negate a variable.
|
||||||
|
# Example: `!$test` matches any file that is not a go test file.
|
||||||
|
#
|
||||||
|
# `$all` - matches all go files
|
||||||
|
# `$test` - matches all go test files
|
||||||
|
#
|
||||||
|
# - Package Variables
|
||||||
|
#
|
||||||
|
# `$gostd` - matches all of go's standard library (Pulled from `GOROOT`)
|
||||||
|
#
|
||||||
|
# Default (applies if no custom rules are defined): Only allow $gostd in all files.
|
||||||
|
rules:
|
||||||
|
"deprecated":
|
||||||
|
# List of file globs that will match this list of settings to compare against.
|
||||||
|
# By default, if a path is relative, it is relative to the directory where the golangci-lint command is executed.
|
||||||
|
# The placeholder '${base-path}' is substituted with a path relative to the mode defined with `run.relative-path-mode`.
|
||||||
|
# The placeholder '${config-path}' is substituted with a path relative to the configuration file.
|
||||||
|
# Default: $all
|
||||||
|
files:
|
||||||
|
- "$all"
|
||||||
|
# List of packages that are not allowed.
|
||||||
|
# Entries can be a variable (starting with $), a string prefix, or an exact match (if ending with $).
|
||||||
|
# Default: []
|
||||||
|
deny:
|
||||||
|
- pkg: github.com/golang/protobuf
|
||||||
|
desc: Use google.golang.org/protobuf instead, see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules
|
||||||
|
- pkg: github.com/satori/go.uuid
|
||||||
|
desc: Use github.com/google/uuid instead, satori's package is not maintained
|
||||||
|
- pkg: github.com/gofrs/uuid$
|
||||||
|
desc: Use github.com/gofrs/uuid/v5 or later, it was not a go module before v5
|
||||||
|
"non-test files":
|
||||||
|
files:
|
||||||
|
- "!$test"
|
||||||
|
deny:
|
||||||
|
- pkg: math/rand$
|
||||||
|
desc: Use math/rand/v2 instead, see https://go.dev/blog/randv2
|
||||||
|
"non-main files":
|
||||||
|
files:
|
||||||
|
- "!**/main.go"
|
||||||
|
deny:
|
||||||
|
- pkg: log$
|
||||||
|
desc: Use log/slog instead, see https://go.dev/blog/slog
|
||||||
|
|
||||||
|
embeddedstructfieldcheck:
|
||||||
|
# Checks that sync.Mutex and sync.RWMutex are not used as embedded fields.
|
||||||
|
# Default: false
|
||||||
|
forbid-mutex: true
|
||||||
|
|
||||||
|
errcheck:
|
||||||
|
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||||
|
# Such cases aren't reported by default.
|
||||||
|
# Default: false
|
||||||
|
check-type-assertions: true
|
||||||
|
exclude-functions:
|
||||||
|
- (*git.wntrmute.dev/kyle/goutils/sbuf.Buffer).Write
|
||||||
|
|
||||||
|
exhaustive:
|
||||||
|
# Program elements to check for exhaustiveness.
|
||||||
|
# Default: [ switch ]
|
||||||
|
check:
|
||||||
|
- switch
|
||||||
|
- map
|
||||||
|
|
||||||
|
exhaustruct:
|
||||||
|
# List of regular expressions to match type names that should be excluded from processing.
|
||||||
|
# Anonymous structs can be matched by '<anonymous>' alias.
|
||||||
|
# Has precedence over `include`.
|
||||||
|
# Each regular expression must match the full type name, including package path.
|
||||||
|
# For example, to match type `net/http.Cookie` regular expression should be `.*/http\.Cookie`,
|
||||||
|
# but not `http\.Cookie`.
|
||||||
|
# Default: []
|
||||||
|
exclude:
|
||||||
|
# std libs
|
||||||
|
- ^net/http.Client$
|
||||||
|
- ^net/http.Cookie$
|
||||||
|
- ^net/http.Request$
|
||||||
|
- ^net/http.Response$
|
||||||
|
- ^net/http.Server$
|
||||||
|
- ^net/http.Transport$
|
||||||
|
- ^net/url.URL$
|
||||||
|
- ^os/exec.Cmd$
|
||||||
|
- ^reflect.StructField$
|
||||||
|
# public libs
|
||||||
|
- ^github.com/Shopify/sarama.Config$
|
||||||
|
- ^github.com/Shopify/sarama.ProducerMessage$
|
||||||
|
- ^github.com/mitchellh/mapstructure.DecoderConfig$
|
||||||
|
- ^github.com/prometheus/client_golang/.+Opts$
|
||||||
|
- ^github.com/spf13/cobra.Command$
|
||||||
|
- ^github.com/spf13/cobra.CompletionOptions$
|
||||||
|
- ^github.com/stretchr/testify/mock.Mock$
|
||||||
|
- ^github.com/testcontainers/testcontainers-go.+Request$
|
||||||
|
- ^github.com/testcontainers/testcontainers-go.FromDockerfile$
|
||||||
|
- ^golang.org/x/tools/go/analysis.Analyzer$
|
||||||
|
- ^google.golang.org/protobuf/.+Options$
|
||||||
|
- ^gopkg.in/yaml.v3.Node$
|
||||||
|
# Allows empty structures in return statements.
|
||||||
|
# Default: false
|
||||||
|
allow-empty-returns: true
|
||||||
|
|
||||||
|
funcorder:
|
||||||
|
# Checks if the exported methods of a structure are placed before the non-exported ones.
|
||||||
|
# Default: true
|
||||||
|
struct-method: false
|
||||||
|
|
||||||
|
funlen:
|
||||||
|
# Checks the number of lines in a function.
|
||||||
|
# If lower than 0, disable the check.
|
||||||
|
# Default: 60
|
||||||
|
lines: 100
|
||||||
|
# Checks the number of statements in a function.
|
||||||
|
# If lower than 0, disable the check.
|
||||||
|
# Default: 40
|
||||||
|
statements: 50
|
||||||
|
|
||||||
|
gochecksumtype:
|
||||||
|
# Presence of `default` case in switch statements satisfies exhaustiveness, if all members are not listed.
|
||||||
|
# Default: true
|
||||||
|
default-signifies-exhaustive: false
|
||||||
|
|
||||||
|
gocognit:
|
||||||
|
# Minimal code complexity to report.
|
||||||
|
# Default: 30 (but we recommend 10-20)
|
||||||
|
min-complexity: 20
|
||||||
|
|
||||||
|
gocritic:
|
||||||
|
# Settings passed to gocritic.
|
||||||
|
# The settings key is the name of a supported gocritic checker.
|
||||||
|
# The list of supported checkers can be found at https://go-critic.com/overview.
|
||||||
|
settings:
|
||||||
|
captLocal:
|
||||||
|
# Whether to restrict checker to params only.
|
||||||
|
# Default: true
|
||||||
|
paramsOnly: false
|
||||||
|
underef:
|
||||||
|
# Whether to skip (*x).method() calls where x is a pointer receiver.
|
||||||
|
# Default: true
|
||||||
|
skipRecvDeref: false
|
||||||
|
|
||||||
|
godoclint:
|
||||||
|
# List of rules to enable in addition to the default set.
|
||||||
|
# Default: empty
|
||||||
|
enable:
|
||||||
|
# Assert no unused link in godocs.
|
||||||
|
# https://github.com/godoc-lint/godoc-lint?tab=readme-ov-file#no-unused-link
|
||||||
|
- no-unused-link
|
||||||
|
|
||||||
|
govet:
|
||||||
|
# Enable all analyzers.
|
||||||
|
# Default: false
|
||||||
|
enable-all: true
|
||||||
|
# Disable analyzers by name.
|
||||||
|
# Run `GL_DEBUG=govet golangci-lint run --enable=govet` to see default, all available analyzers, and enabled analyzers.
|
||||||
|
# Default: []
|
||||||
|
disable:
|
||||||
|
- fieldalignment # too strict
|
||||||
|
# Settings per analyzer.
|
||||||
|
settings:
|
||||||
|
shadow:
|
||||||
|
# Whether to be strict about shadowing; can be noisy.
|
||||||
|
# Default: false
|
||||||
|
strict: true
|
||||||
|
|
||||||
|
inamedparam:
|
||||||
|
# Skips check for interface methods with only a single parameter.
|
||||||
|
# Default: false
|
||||||
|
skip-single-param: true
|
||||||
|
|
||||||
|
mnd:
|
||||||
|
ignored-functions:
|
||||||
|
- args.Error
|
||||||
|
- flag.Arg
|
||||||
|
- flag.Duration.*
|
||||||
|
- flag.Float.*
|
||||||
|
- flag.Int.*
|
||||||
|
- flag.Uint.*
|
||||||
|
- os.Chmod
|
||||||
|
- os.Mkdir.*
|
||||||
|
- os.OpenFile
|
||||||
|
- os.WriteFile
|
||||||
|
- prometheus.ExponentialBuckets.*
|
||||||
|
- prometheus.LinearBuckets
|
||||||
|
|
||||||
|
nakedret:
|
||||||
|
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
||||||
|
# Default: 30
|
||||||
|
max-func-lines: 0
|
||||||
|
|
||||||
|
nolintlint:
|
||||||
|
# Exclude the following linters from requiring an explanation.
|
||||||
|
# Default: []
|
||||||
|
allow-no-explanation: [ funlen, gocognit, golines ]
|
||||||
|
# Enable to require an explanation of nonzero length after each nolint directive.
|
||||||
|
# Default: false
|
||||||
|
require-explanation: true
|
||||||
|
# Enable to require nolint directives to mention the specific linter being suppressed.
|
||||||
|
# Default: false
|
||||||
|
require-specific: true
|
||||||
|
|
||||||
|
perfsprint:
|
||||||
|
# Optimizes into strings concatenation.
|
||||||
|
# Default: true
|
||||||
|
strconcat: false
|
||||||
|
|
||||||
|
reassign:
|
||||||
|
# Patterns for global variable names that are checked for reassignment.
|
||||||
|
# See https://github.com/curioswitch/go-reassign#usage
|
||||||
|
# Default: ["EOF", "Err.*"]
|
||||||
|
patterns:
|
||||||
|
- ".*"
|
||||||
|
|
||||||
|
rowserrcheck:
|
||||||
|
# database/sql is always checked.
|
||||||
|
# Default: []
|
||||||
|
packages:
|
||||||
|
- github.com/jmoiron/sqlx
|
||||||
|
|
||||||
|
sloglint:
|
||||||
|
# Enforce not using global loggers.
|
||||||
|
# Values:
|
||||||
|
# - "": disabled
|
||||||
|
# - "all": report all global loggers
|
||||||
|
# - "default": report only the default slog logger
|
||||||
|
# https://github.com/go-simpler/sloglint?tab=readme-ov-file#no-global
|
||||||
|
# Default: ""
|
||||||
|
no-global: all
|
||||||
|
# Enforce using methods that accept a context.
|
||||||
|
# Values:
|
||||||
|
# - "": disabled
|
||||||
|
# - "all": report all contextless calls
|
||||||
|
# - "scope": report only if a context exists in the scope of the outermost function
|
||||||
|
# https://github.com/go-simpler/sloglint?tab=readme-ov-file#context-only
|
||||||
|
# Default: ""
|
||||||
|
context: scope
|
||||||
|
|
||||||
|
staticcheck:
|
||||||
|
# SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks
|
||||||
|
# Example (to disable some checks): [ "all", "-SA1000", "-SA1001"]
|
||||||
|
# Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
|
||||||
|
checks:
|
||||||
|
- all
|
||||||
|
# Incorrect or missing package comment.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1000
|
||||||
|
- -ST1000
|
||||||
|
# Use consistent method receiver names.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1016
|
||||||
|
- -ST1016
|
||||||
|
# Omit embedded fields from selector expression.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1008
|
||||||
|
- -QF1008
|
||||||
|
|
||||||
|
usetesting:
|
||||||
|
# Enable/disable `os.TempDir()` detections.
|
||||||
|
# Default: false
|
||||||
|
os-temp-dir: true
|
||||||
|
|
||||||
|
exclusions:
|
||||||
|
# Log a warning if an exclusion rule is unused.
|
||||||
|
# Default: false
|
||||||
|
warn-unused: true
|
||||||
|
# Predefined exclusion rules.
|
||||||
|
# Default: []
|
||||||
|
presets:
|
||||||
|
- std-error-handling
|
||||||
|
- common-false-positives
|
||||||
|
rules:
|
||||||
|
- path: 'ahash/ahash.go'
|
||||||
|
linters: [ staticcheck, gosec ]
|
||||||
|
- path: 'backoff/backoff_test.go'
|
||||||
|
linters: [ testpackage ]
|
||||||
|
- path: 'dbg/dbg_test.go'
|
||||||
|
linters: [ testpackage ]
|
||||||
|
- path: 'log/logger.go'
|
||||||
|
linters: [ forbidigo ]
|
||||||
|
- path: 'logging/example_test.go'
|
||||||
|
linters: [ testableexamples ]
|
||||||
|
- path: 'main.go'
|
||||||
|
linters: [ forbidigo, mnd, reassign ]
|
||||||
|
- source: 'TODO'
|
||||||
|
linters: [ godot ]
|
||||||
|
- text: 'should have a package comment'
|
||||||
|
linters: [ revive ]
|
||||||
|
- text: 'exported \S+ \S+ should have comment( \(or a comment on this block\))? or be unexported'
|
||||||
|
linters: [ revive ]
|
||||||
|
- text: 'package comment should be of the form ".+"'
|
||||||
|
source: '// ?(nolint|TODO)'
|
||||||
|
linters: [ revive ]
|
||||||
|
- text: 'comment on exported \S+ \S+ should be of the form ".+"'
|
||||||
|
source: '// ?(nolint|TODO)'
|
||||||
|
linters: [ revive, staticcheck ]
|
||||||
|
- path: '_test\.go'
|
||||||
|
linters:
|
||||||
|
- bodyclose
|
||||||
|
- dupl
|
||||||
|
- errcheck
|
||||||
|
- funlen
|
||||||
|
- goconst
|
||||||
|
- gosec
|
||||||
|
- noctx
|
||||||
|
- reassign
|
||||||
|
- wrapcheck
|
||||||
26
.travis.yml
26
.travis.yml
@@ -1,26 +0,0 @@
|
|||||||
arch:
|
|
||||||
- amd64
|
|
||||||
- ppc64le
|
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- tip
|
|
||||||
- 1.9
|
|
||||||
jobs:
|
|
||||||
exclude:
|
|
||||||
- go: 1.9
|
|
||||||
arch: amd64
|
|
||||||
- go: 1.9
|
|
||||||
arch: ppc64le
|
|
||||||
script:
|
|
||||||
- go get golang.org/x/lint/golint
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
- go get github.com/kisom/goutils/...
|
|
||||||
- go test -cover github.com/kisom/goutils/...
|
|
||||||
- golint github.com/kisom/goutils/...
|
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
recipients:
|
|
||||||
- coder@kyleisom.net
|
|
||||||
on_success: change
|
|
||||||
on_failure: change
|
|
||||||
68
CHANGELOG
68
CHANGELOG
@@ -1,27 +1,59 @@
|
|||||||
Release 1.2.1 - 2018-09-15
|
CHANGELOG
|
||||||
|
|
||||||
+ Add missing format argument to Errorf call in kgz.
|
v1.11.0 - 2025-11-15
|
||||||
|
|
||||||
Release 1.2.0 - 2018-09-15
|
Added
|
||||||
|
- cache/mru: introduce MRU cache implementation with timestamp utilities.
|
||||||
|
|
||||||
+ Adds the kgz command line utility.
|
Changed
|
||||||
|
- certlib: complete overhaul to simplify APIs and internals.
|
||||||
|
- repo: widespread linting cleanups across many packages (config, dbg, die,
|
||||||
|
fileutil, log/logging, mwc, sbuf, seekbuf, tee, testio, etc.).
|
||||||
|
- cmd: general program cleanups; `cert-bundler` lint fixes.
|
||||||
|
|
||||||
Release 1.1.0 - 2017-11-16
|
Removed
|
||||||
|
- rand: remove unused package.
|
||||||
|
- testutil: remove unused code.
|
||||||
|
|
||||||
+ A number of new command line utilities were added
|
|
||||||
|
|
||||||
+ atping
|
v1.10.1 — 2025-11-15
|
||||||
+ cruntar
|
|
||||||
+ renfnv
|
|
||||||
+
|
|
||||||
+ ski
|
|
||||||
+ subjhash
|
|
||||||
+ yamll
|
|
||||||
|
|
||||||
+ new package: ahash
|
Changed
|
||||||
+ package for loading hashes from an algorithm string
|
- certlib: major overhaul and refactor.
|
||||||
|
- repo: linter autofixes ahead of release.
|
||||||
|
|
||||||
+ new certificate loading functions in the lib package
|
|
||||||
|
|
||||||
+ new package: tee
|
v1.10.0 — 2025-11-14
|
||||||
+ emulates tee(1)
|
|
||||||
|
Added
|
||||||
|
- cmd: add `cert-revcheck` command.
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- ci/lint: add golangci-lint stage and initial cleanup.
|
||||||
|
|
||||||
|
|
||||||
|
v1.9.1 — 2025-11-15
|
||||||
|
|
||||||
|
Fixed
|
||||||
|
- die: correct calls to `die.With`.
|
||||||
|
|
||||||
|
|
||||||
|
v1.9.0 — 2025-11-14
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cmd: add `cert-bundler` tool.
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- misc: minor updates and maintenance.
|
||||||
|
|
||||||
|
|
||||||
|
v1.8.1 — 2025-11-14
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cmd: add `tlsinfo` tool.
|
||||||
|
|
||||||
|
|
||||||
|
v1.8.0 — 2025-11-14
|
||||||
|
|
||||||
|
Baseline
|
||||||
|
- Initial baseline for this changelog series.
|
||||||
|
|||||||
197
LICENSE
197
LICENSE
@@ -1,19 +1,194 @@
|
|||||||
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
|
Copyright 2025 K. Isom <kyle@imap.cc>
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
you may not use this file except in compliance with the License.
|
||||||
copyright notice and this permission notice appear in all copies.
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
=======================================================================
|
=======================================================================
|
||||||
|
|
||||||
The backoff package (written during my time at Cloudflare) is released
|
The backoff package (written during my time at Cloudflare) is released
|
||||||
under the following license:
|
under the following license:
|
||||||
|
|
||||||
|
|||||||
40
README.md
40
README.md
@@ -78,3 +78,43 @@ Each program should have a small README in the directory with more
|
|||||||
information.
|
information.
|
||||||
|
|
||||||
All code here is licensed under the ISC license.
|
All code here is licensed under the ISC license.
|
||||||
|
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
--------------
|
||||||
|
|
||||||
|
This repo standardizes on Go 1.13+ error wrapping and matching. Libraries and
|
||||||
|
CLIs should:
|
||||||
|
|
||||||
|
- Wrap causes with context using `fmt.Errorf("context: %w", err)`.
|
||||||
|
- Use typed, structured errors from `certlib/certerr` for certificate-related
|
||||||
|
operations. These include a typed `*certerr.Error` with `Source` and `Kind`.
|
||||||
|
- Match errors programmatically:
|
||||||
|
- `errors.Is(err, certerr.ErrEncryptedPrivateKey)` to detect sentinel states.
|
||||||
|
- `errors.As(err, &e)` (where `var e *certerr.Error`) to inspect
|
||||||
|
`e.Source`/`e.Kind`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
cert, err := certlib.LoadCertificate(path)
|
||||||
|
if err != nil {
|
||||||
|
// sentinel match
|
||||||
|
if errors.Is(err, certerr.ErrEmptyCertificate) {
|
||||||
|
// handle empty input
|
||||||
|
}
|
||||||
|
|
||||||
|
// typed error match
|
||||||
|
var ce *certerr.Error
|
||||||
|
if errors.As(err, &ce) {
|
||||||
|
switch ce.Kind {
|
||||||
|
case certerr.KindParse:
|
||||||
|
// parse error handling
|
||||||
|
case certerr.KindLoad:
|
||||||
|
// file loading error handling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Avoid including sensitive data (keys, passwords, tokens) in error messages.
|
||||||
|
|||||||
@@ -4,8 +4,8 @@
|
|||||||
package ahash
|
package ahash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5" // #nosec G505
|
||||||
"crypto/sha1"
|
"crypto/sha1" // #nosec G501
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"errors"
|
"errors"
|
||||||
@@ -17,34 +17,15 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/assert"
|
|
||||||
"golang.org/x/crypto/blake2b"
|
"golang.org/x/crypto/blake2b"
|
||||||
"golang.org/x/crypto/blake2s"
|
"golang.org/x/crypto/blake2s"
|
||||||
"golang.org/x/crypto/md4"
|
"golang.org/x/crypto/md4" // #nosec G506
|
||||||
"golang.org/x/crypto/ripemd160"
|
"golang.org/x/crypto/ripemd160" // #nosec G507
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func sha224Slicer(bs []byte) []byte {
|
|
||||||
sum := sha256.Sum224(bs)
|
|
||||||
return sum[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha256Slicer(bs []byte) []byte {
|
|
||||||
sum := sha256.Sum256(bs)
|
|
||||||
return sum[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha384Slicer(bs []byte) []byte {
|
|
||||||
sum := sha512.Sum384(bs)
|
|
||||||
return sum[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha512Slicer(bs []byte) []byte {
|
|
||||||
sum := sha512.Sum512(bs)
|
|
||||||
return sum[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash represents a generic hash function that may or may not be secure. It
|
// Hash represents a generic hash function that may or may not be secure. It
|
||||||
// satisfies the hash.Hash interface.
|
// satisfies the hash.Hash interface.
|
||||||
type Hash struct {
|
type Hash struct {
|
||||||
@@ -247,17 +228,17 @@ func init() {
|
|||||||
// HashList returns a sorted list of all the hash algorithms supported by the
|
// HashList returns a sorted list of all the hash algorithms supported by the
|
||||||
// package.
|
// package.
|
||||||
func HashList() []string {
|
func HashList() []string {
|
||||||
return hashList[:]
|
return hashList
|
||||||
}
|
}
|
||||||
|
|
||||||
// SecureHashList returns a sorted list of all the secure (cryptographic) hash
|
// SecureHashList returns a sorted list of all the secure (cryptographic) hash
|
||||||
// algorithms supported by the package.
|
// algorithms supported by the package.
|
||||||
func SecureHashList() []string {
|
func SecureHashList() []string {
|
||||||
return secureHashList[:]
|
return secureHashList
|
||||||
}
|
}
|
||||||
|
|
||||||
// InsecureHashList returns a sorted list of all the insecure hash algorithms
|
// InsecureHashList returns a sorted list of all the insecure hash algorithms
|
||||||
// supported by the package.
|
// supported by the package.
|
||||||
func InsecureHashList() []string {
|
func InsecureHashList() []string {
|
||||||
return insecureHashList[:]
|
return insecureHashList
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,18 @@
|
|||||||
package ahash
|
package ahash_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/ahash"
|
||||||
"git.wntrmute.dev/kyle/goutils/assert"
|
"git.wntrmute.dev/kyle/goutils/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSecureHash(t *testing.T) {
|
func TestSecureHash(t *testing.T) {
|
||||||
algo := "sha256"
|
algo := "sha256"
|
||||||
h, err := New(algo)
|
h, err := ahash.New(algo)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, h.IsSecure(), algo+" should be a secure hash")
|
assert.BoolT(t, h.IsSecure(), algo+" should be a secure hash")
|
||||||
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
||||||
@@ -19,28 +21,28 @@ func TestSecureHash(t *testing.T) {
|
|||||||
|
|
||||||
var data []byte
|
var data []byte
|
||||||
var expected = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
var expected = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
sum, err := Sum(algo, data)
|
sum, err := ahash.Sum(algo, data)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
assert.BoolT(t, hex.EncodeToString(sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
||||||
|
|
||||||
data = []byte("hello, world")
|
data = []byte("hello, world")
|
||||||
buf := bytes.NewBuffer(data)
|
buf := bytes.NewBuffer(data)
|
||||||
expected = "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
expected = "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
||||||
sum, err = SumReader(algo, buf)
|
sum, err = ahash.SumReader(algo, buf)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
assert.BoolT(t, hex.EncodeToString(sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
||||||
|
|
||||||
data = []byte("hello world")
|
data = []byte("hello world")
|
||||||
_, err = h.Write(data)
|
_, err = h.Write(data)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
unExpected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
unExpected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
||||||
sum = h.Sum(nil)
|
sum = h.Sum(nil)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
|
assert.BoolT(t, hex.EncodeToString(sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInsecureHash(t *testing.T) {
|
func TestInsecureHash(t *testing.T) {
|
||||||
algo := "md5"
|
algo := "md5"
|
||||||
h, err := New(algo)
|
h, err := ahash.New(algo)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
||||||
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
||||||
@@ -49,28 +51,28 @@ func TestInsecureHash(t *testing.T) {
|
|||||||
|
|
||||||
var data []byte
|
var data []byte
|
||||||
var expected = "d41d8cd98f00b204e9800998ecf8427e"
|
var expected = "d41d8cd98f00b204e9800998ecf8427e"
|
||||||
sum, err := Sum(algo, data)
|
sum, err := ahash.Sum(algo, data)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
assert.BoolT(t, hex.EncodeToString(sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
||||||
|
|
||||||
data = []byte("hello, world")
|
data = []byte("hello, world")
|
||||||
buf := bytes.NewBuffer(data)
|
buf := bytes.NewBuffer(data)
|
||||||
expected = "e4d7f1b4ed2e42d15898f4b27b019da4"
|
expected = "e4d7f1b4ed2e42d15898f4b27b019da4"
|
||||||
sum, err = SumReader(algo, buf)
|
sum, err = ahash.SumReader(algo, buf)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
assert.BoolT(t, hex.EncodeToString(sum) == expected, fmt.Sprintf("expected hash %s but have %x", expected, sum))
|
||||||
|
|
||||||
data = []byte("hello world")
|
data = []byte("hello world")
|
||||||
_, err = h.Write(data)
|
_, err = h.Write(data)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
unExpected := "e4d7f1b4ed2e42d15898f4b27b019da4"
|
unExpected := "e4d7f1b4ed2e42d15898f4b27b019da4"
|
||||||
sum = h.Sum(nil)
|
sum = h.Sum(nil)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
|
assert.BoolT(t, hex.EncodeToString(sum) != unExpected, fmt.Sprintf("hash shouldn't have returned %x", unExpected))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHash32(t *testing.T) {
|
func TestHash32(t *testing.T) {
|
||||||
algo := "crc32-ieee"
|
algo := "crc32-ieee"
|
||||||
h, err := New(algo)
|
h, err := ahash.New(algo)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
||||||
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
||||||
@@ -102,7 +104,7 @@ func TestHash32(t *testing.T) {
|
|||||||
|
|
||||||
func TestHash64(t *testing.T) {
|
func TestHash64(t *testing.T) {
|
||||||
algo := "crc64"
|
algo := "crc64"
|
||||||
h, err := New(algo)
|
h, err := ahash.New(algo)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
assert.BoolT(t, !h.IsSecure(), algo+" shouldn't be a secure hash")
|
||||||
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
assert.BoolT(t, h.HashAlgo() == algo, "hash returned the wrong HashAlgo")
|
||||||
@@ -133,9 +135,9 @@ func TestHash64(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestListLengthSanity(t *testing.T) {
|
func TestListLengthSanity(t *testing.T) {
|
||||||
all := HashList()
|
all := ahash.HashList()
|
||||||
secure := SecureHashList()
|
secure := ahash.SecureHashList()
|
||||||
insecure := InsecureHashList()
|
insecure := ahash.InsecureHashList()
|
||||||
|
|
||||||
assert.BoolT(t, len(all) == len(secure)+len(insecure))
|
assert.BoolT(t, len(all) == len(secure)+len(insecure))
|
||||||
}
|
}
|
||||||
@@ -146,11 +148,11 @@ func TestSumLimitedReader(t *testing.T) {
|
|||||||
extendedData := bytes.NewBufferString("hello, world! this is an extended message")
|
extendedData := bytes.NewBufferString("hello, world! this is an extended message")
|
||||||
expected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
expected := "09ca7e4eaa6e8ae9c7d261167129184883644d07dfba7cbfbc4c8a2e08360d5b"
|
||||||
|
|
||||||
hash, err := SumReader("sha256", data)
|
hash, err := ahash.SumReader("sha256", data)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
assert.BoolT(t, fmt.Sprintf("%x", hash) == expected, fmt.Sprintf("have hash %x, want %s", hash, expected))
|
assert.BoolT(t, hex.EncodeToString(hash) == expected, fmt.Sprintf("have hash %x, want %s", hash, expected))
|
||||||
|
|
||||||
extendedHash, err := SumLimitedReader("sha256", extendedData, int64(dataLen))
|
extendedHash, err := ahash.SumLimitedReader("sha256", extendedData, int64(dataLen))
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
assert.BoolT(t, bytes.Equal(hash, extendedHash), fmt.Sprintf("have hash %x, want %x", extendedHash, hash))
|
assert.BoolT(t, bytes.Equal(hash, extendedHash), fmt.Sprintf("have hash %x, want %x", extendedHash, hash))
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
package assert
|
package assert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -16,11 +17,13 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const callerSkip = 2
|
||||||
|
|
||||||
// NoDebug can be set to true to cause all asserts to be ignored.
|
// NoDebug can be set to true to cause all asserts to be ignored.
|
||||||
var NoDebug bool
|
var NoDebug bool
|
||||||
|
|
||||||
func die(what string, a ...string) {
|
func die(what string, a ...string) {
|
||||||
_, file, line, ok := runtime.Caller(2)
|
_, file, line, ok := runtime.Caller(callerSkip)
|
||||||
if !ok {
|
if !ok {
|
||||||
panic(what)
|
panic(what)
|
||||||
}
|
}
|
||||||
@@ -31,7 +34,8 @@ func die(what string, a ...string) {
|
|||||||
s = ": " + s
|
s = ": " + s
|
||||||
}
|
}
|
||||||
panic(what + s)
|
panic(what + s)
|
||||||
} else {
|
}
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "%s", what)
|
fmt.Fprintf(os.Stderr, "%s", what)
|
||||||
if len(a) > 0 {
|
if len(a) > 0 {
|
||||||
s := strings.Join(a, ", ")
|
s := strings.Join(a, ", ")
|
||||||
@@ -44,16 +48,17 @@ func die(what string, a ...string) {
|
|||||||
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Bool asserts that cond is false.
|
// Bool asserts that cond is false.
|
||||||
//
|
//
|
||||||
// For example, this would replace
|
// For example, this would replace
|
||||||
|
//
|
||||||
// if x < 0 {
|
// if x < 0 {
|
||||||
// log.Fatal("x is subzero")
|
// log.Fatal("x is subzero")
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// The same assertion would be
|
// The same assertion would be
|
||||||
|
//
|
||||||
// assert.Bool(x, "x is subzero")
|
// assert.Bool(x, "x is subzero")
|
||||||
func Bool(cond bool, s ...string) {
|
func Bool(cond bool, s ...string) {
|
||||||
if NoDebug {
|
if NoDebug {
|
||||||
@@ -68,6 +73,7 @@ func Bool(cond bool, s ...string) {
|
|||||||
// Error asserts that err is not nil, e.g. that an error has occurred.
|
// Error asserts that err is not nil, e.g. that an error has occurred.
|
||||||
//
|
//
|
||||||
// For example,
|
// For example,
|
||||||
|
//
|
||||||
// if err == nil {
|
// if err == nil {
|
||||||
// log.Fatal("call to <something> should have failed")
|
// log.Fatal("call to <something> should have failed")
|
||||||
// }
|
// }
|
||||||
@@ -100,7 +106,7 @@ func NoError(err error, s ...string) {
|
|||||||
|
|
||||||
// ErrorEq asserts that the actual error is the expected error.
|
// ErrorEq asserts that the actual error is the expected error.
|
||||||
func ErrorEq(expected, actual error) {
|
func ErrorEq(expected, actual error) {
|
||||||
if NoDebug || (expected == actual) {
|
if NoDebug || (errors.Is(expected, actual)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,7 +161,7 @@ func NoErrorT(t *testing.T, err error) {
|
|||||||
// ErrorEqT compares a pair of errors, calling Fatal on it if they
|
// ErrorEqT compares a pair of errors, calling Fatal on it if they
|
||||||
// don't match.
|
// don't match.
|
||||||
func ErrorEqT(t *testing.T, expected, actual error) {
|
func ErrorEqT(t *testing.T, expected, actual error) {
|
||||||
if NoDebug || (expected == actual) {
|
if NoDebug || (errors.Is(expected, actual)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,29 +10,21 @@
|
|||||||
// backoff is configured with a maximum duration that will not be
|
// backoff is configured with a maximum duration that will not be
|
||||||
// exceeded.
|
// exceeded.
|
||||||
//
|
//
|
||||||
// The `New` function will attempt to use the system's cryptographic
|
// This package uses math/rand/v2 for jitter, which is automatically
|
||||||
// random number generator to seed a Go math/rand random number
|
// seeded from a cryptographically secure source.
|
||||||
// source. If this fails, the package will panic on startup.
|
|
||||||
package backoff
|
package backoff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"math"
|
"math"
|
||||||
mrand "math/rand"
|
"math/rand/v2"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var prngMu sync.Mutex
|
|
||||||
var prng *mrand.Rand
|
|
||||||
|
|
||||||
// DefaultInterval is used when a Backoff is initialised with a
|
// DefaultInterval is used when a Backoff is initialised with a
|
||||||
// zero-value Interval.
|
// zero-value Interval.
|
||||||
var DefaultInterval = 5 * time.Minute
|
var DefaultInterval = 5 * time.Minute
|
||||||
|
|
||||||
// DefaultMaxDuration is maximum amount of time that the backoff will
|
// DefaultMaxDuration is the maximum amount of time that the backoff will
|
||||||
// delay for.
|
// delay for.
|
||||||
var DefaultMaxDuration = 6 * time.Hour
|
var DefaultMaxDuration = 6 * time.Hour
|
||||||
|
|
||||||
@@ -50,10 +42,9 @@ type Backoff struct {
|
|||||||
// interval controls the time step for backing off.
|
// interval controls the time step for backing off.
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
|
||||||
// noJitter controls whether to use the "Full Jitter"
|
// noJitter controls whether to use the "Full Jitter" improvement to attempt
|
||||||
// improvement to attempt to smooth out spikes in a high
|
// to smooth out spikes in a high-contention scenario. If noJitter is set to
|
||||||
// contention scenario. If noJitter is set to true, no
|
// true, no jitter will be introduced.
|
||||||
// jitter will be introduced.
|
|
||||||
noJitter bool
|
noJitter bool
|
||||||
|
|
||||||
// decay controls the decay of n. If it is non-zero, n is
|
// decay controls the decay of n. If it is non-zero, n is
|
||||||
@@ -65,17 +56,17 @@ type Backoff struct {
|
|||||||
lastTry time.Time
|
lastTry time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new backoff with the specified max duration and
|
// New creates a new backoff with the specified maxDuration duration and
|
||||||
// interval. Zero values may be used to use the default values.
|
// interval. Zero values may be used to use the default values.
|
||||||
//
|
//
|
||||||
// Panics if either max or interval is negative.
|
// Panics if either dMax or interval is negative.
|
||||||
func New(max time.Duration, interval time.Duration) *Backoff {
|
func New(dMax time.Duration, interval time.Duration) *Backoff {
|
||||||
if max < 0 || interval < 0 {
|
if dMax < 0 || interval < 0 {
|
||||||
panic("backoff: max or interval is negative")
|
panic("backoff: dMax or interval is negative")
|
||||||
}
|
}
|
||||||
|
|
||||||
b := &Backoff{
|
b := &Backoff{
|
||||||
maxDuration: max,
|
maxDuration: dMax,
|
||||||
interval: interval,
|
interval: interval,
|
||||||
}
|
}
|
||||||
b.setup()
|
b.setup()
|
||||||
@@ -84,27 +75,12 @@ func New(max time.Duration, interval time.Duration) *Backoff {
|
|||||||
|
|
||||||
// NewWithoutJitter works similarly to New, except that the created
|
// NewWithoutJitter works similarly to New, except that the created
|
||||||
// Backoff will not use jitter.
|
// Backoff will not use jitter.
|
||||||
func NewWithoutJitter(max time.Duration, interval time.Duration) *Backoff {
|
func NewWithoutJitter(dMax time.Duration, interval time.Duration) *Backoff {
|
||||||
b := New(max, interval)
|
b := New(dMax, interval)
|
||||||
b.noJitter = true
|
b.noJitter = true
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
var buf [8]byte
|
|
||||||
var n int64
|
|
||||||
|
|
||||||
_, err := io.ReadFull(rand.Reader, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
n = int64(binary.LittleEndian.Uint64(buf[:]))
|
|
||||||
|
|
||||||
src := mrand.NewSource(n)
|
|
||||||
prng = mrand.New(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Backoff) setup() {
|
func (b *Backoff) setup() {
|
||||||
if b.interval == 0 {
|
if b.interval == 0 {
|
||||||
b.interval = DefaultInterval
|
b.interval = DefaultInterval
|
||||||
@@ -122,35 +98,44 @@ func (b *Backoff) Duration() time.Duration {
|
|||||||
|
|
||||||
b.decayN()
|
b.decayN()
|
||||||
|
|
||||||
t := b.duration(b.n)
|
d := b.duration(b.n)
|
||||||
|
|
||||||
if b.n < math.MaxUint64 {
|
if b.n < math.MaxUint64 {
|
||||||
b.n++
|
b.n++
|
||||||
}
|
}
|
||||||
|
|
||||||
if !b.noJitter {
|
if !b.noJitter {
|
||||||
prngMu.Lock()
|
d = time.Duration(rand.Int64N(int64(d))) // #nosec G404
|
||||||
t = time.Duration(prng.Int63n(int64(t)))
|
|
||||||
prngMu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return t
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxN uint64 = 63
|
||||||
|
|
||||||
// requires b to be locked.
|
// requires b to be locked.
|
||||||
func (b *Backoff) duration(n uint64) (t time.Duration) {
|
func (b *Backoff) duration(n uint64) time.Duration {
|
||||||
// Saturate pow
|
// Use left shift on the underlying integer representation to avoid
|
||||||
pow := time.Duration(math.MaxInt64)
|
// multiplying time.Duration by time.Duration (which is semantically
|
||||||
if n < 63 {
|
// incorrect and flagged by linters).
|
||||||
pow = 1 << n
|
if n >= maxN {
|
||||||
|
// Saturate when n would overflow a 64-bit shift or exceed maxDuration.
|
||||||
|
return b.maxDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
t = b.interval * pow
|
// Calculate 2^n * interval using a shift. Detect overflow by checking
|
||||||
if t/pow != b.interval || t > b.maxDuration {
|
// for sign change or monotonicity loss and clamp to maxDuration.
|
||||||
t = b.maxDuration
|
shifted := b.interval << n
|
||||||
|
if shifted < 0 || shifted < b.interval {
|
||||||
|
// Overflow occurred during the shift; clamp to maxDuration.
|
||||||
|
return b.maxDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
if shifted > b.maxDuration {
|
||||||
|
return b.maxDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
return shifted
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset resets the attempt counter of a backoff.
|
// Reset resets the attempt counter of a backoff.
|
||||||
@@ -174,7 +159,7 @@ func (b *Backoff) SetDecay(decay time.Duration) {
|
|||||||
b.decay = decay
|
b.decay = decay
|
||||||
}
|
}
|
||||||
|
|
||||||
// requires b to be locked
|
// requires b to be locked.
|
||||||
func (b *Backoff) decayN() {
|
func (b *Backoff) decayN() {
|
||||||
if b.decay == 0 {
|
if b.decay == 0 {
|
||||||
return
|
return
|
||||||
@@ -186,7 +171,9 @@ func (b *Backoff) decayN() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lastDuration := b.duration(b.n - 1)
|
lastDuration := b.duration(b.n - 1)
|
||||||
decayed := time.Since(b.lastTry) > lastDuration+b.decay
|
// Reset when the elapsed time is at least the previous backoff plus decay.
|
||||||
|
// Using ">=" avoids boundary flakiness in tests and real usage.
|
||||||
|
decayed := time.Since(b.lastTry) >= lastDuration+b.decay
|
||||||
b.lastTry = time.Now()
|
b.lastTry = time.Now()
|
||||||
|
|
||||||
if !decayed {
|
if !decayed {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
// If given New with 0's and no jitter, ensure that certain invariants are met:
|
// If given New with 0's and no jitter, ensure that certain invariants are met:
|
||||||
//
|
//
|
||||||
// - the default max duration and interval should be used
|
// - the default maxDuration duration and interval should be used
|
||||||
// - noJitter should be true
|
// - noJitter should be true
|
||||||
// - the RNG should not be initialised
|
// - the RNG should not be initialised
|
||||||
// - the first duration should be equal to the default interval
|
// - the first duration should be equal to the default interval
|
||||||
@@ -17,7 +17,11 @@ func TestDefaults(t *testing.T) {
|
|||||||
b := NewWithoutJitter(0, 0)
|
b := NewWithoutJitter(0, 0)
|
||||||
|
|
||||||
if b.maxDuration != DefaultMaxDuration {
|
if b.maxDuration != DefaultMaxDuration {
|
||||||
t.Fatalf("expected new backoff to use the default max duration (%s), but have %s", DefaultMaxDuration, b.maxDuration)
|
t.Fatalf(
|
||||||
|
"expected new backoff to use the default maxDuration duration (%s), but have %s",
|
||||||
|
DefaultMaxDuration,
|
||||||
|
b.maxDuration,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.interval != DefaultInterval {
|
if b.interval != DefaultInterval {
|
||||||
@@ -44,11 +48,11 @@ func TestSetup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that tries incremenets as expected.
|
// Ensure that tries increments as expected.
|
||||||
func TestTries(t *testing.T) {
|
func TestTries(t *testing.T) {
|
||||||
b := NewWithoutJitter(5, 1)
|
b := NewWithoutJitter(5, 1)
|
||||||
|
|
||||||
for i := uint64(0); i < 3; i++ {
|
for i := range uint64(3) {
|
||||||
if b.n != i {
|
if b.n != i {
|
||||||
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
|
t.Fatalf("want tries=%d, have tries=%d", i, b.n)
|
||||||
}
|
}
|
||||||
@@ -73,7 +77,7 @@ func TestTries(t *testing.T) {
|
|||||||
func TestReset(t *testing.T) {
|
func TestReset(t *testing.T) {
|
||||||
const iter = 10
|
const iter = 10
|
||||||
b := New(1000, 1)
|
b := New(1000, 1)
|
||||||
for i := 0; i < iter; i++ {
|
for range iter {
|
||||||
_ = b.Duration()
|
_ = b.Duration()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,17 +92,17 @@ func TestReset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const decay = 5 * time.Millisecond
|
const decay = 5 * time.Millisecond
|
||||||
const max = 10 * time.Millisecond
|
const maxDuration = 10 * time.Millisecond
|
||||||
const interval = time.Millisecond
|
const interval = time.Millisecond
|
||||||
|
|
||||||
func TestDecay(t *testing.T) {
|
func TestDecay(t *testing.T) {
|
||||||
const iter = 10
|
const iter = 10
|
||||||
|
|
||||||
b := NewWithoutJitter(max, 1)
|
b := NewWithoutJitter(maxDuration, 1)
|
||||||
b.SetDecay(decay)
|
b.SetDecay(decay)
|
||||||
|
|
||||||
var backoff time.Duration
|
var backoff time.Duration
|
||||||
for i := 0; i < iter; i++ {
|
for range iter {
|
||||||
backoff = b.Duration()
|
backoff = b.Duration()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +131,7 @@ func TestDecaySaturation(t *testing.T) {
|
|||||||
b.SetDecay(decay)
|
b.SetDecay(decay)
|
||||||
|
|
||||||
var duration time.Duration
|
var duration time.Duration
|
||||||
for i := 0; i <= 2; i++ {
|
for range 3 {
|
||||||
duration = b.Duration()
|
duration = b.Duration()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +149,7 @@ func TestDecaySaturation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ExampleBackoff_SetDecay() {
|
func ExampleBackoff_SetDecay() {
|
||||||
b := NewWithoutJitter(max, interval)
|
b := NewWithoutJitter(maxDuration, interval)
|
||||||
b.SetDecay(decay)
|
b.SetDecay(decay)
|
||||||
|
|
||||||
// try 0
|
// try 0
|
||||||
|
|||||||
179
cache/lru/lru.go
vendored
Normal file
179
cache/lru/lru.go
vendored
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
// Package lru implements a Least Recently Used cache.
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
type item[V any] struct {
|
||||||
|
V V
|
||||||
|
access int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Cache is a map that retains a limited number of items. It must be
|
||||||
|
// initialized with New, providing a maximum capacity for the cache.
|
||||||
|
// Only the least recently used items are retained.
|
||||||
|
type Cache[K comparable, V any] struct {
|
||||||
|
store map[K]*item[V]
|
||||||
|
access *timestamps[K]
|
||||||
|
cap int
|
||||||
|
clock clock.Clock
|
||||||
|
// All public methods that have the possibility of modifying the
|
||||||
|
// cache should lock it.
|
||||||
|
mtx *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New must be used to create a new Cache.
|
||||||
|
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||||
|
return &Cache[K, V]{
|
||||||
|
store: map[K]*item[V]{},
|
||||||
|
access: newTimestamps[K](icap),
|
||||||
|
cap: icap,
|
||||||
|
clock: clock.New(),
|
||||||
|
mtx: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||||
|
type StringKeyCache[V any] struct {
|
||||||
|
*Cache[string, V]
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStringKeyCache creates a new LRU cache keyed by string.
|
||||||
|
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||||
|
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) lock() {
|
||||||
|
c.mtx.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) unlock() {
|
||||||
|
c.mtx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items currently in the cache.
|
||||||
|
func (c *Cache[K, V]) Len() int {
|
||||||
|
return len(c.store)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evict should remove the least-recently-used cache item.
|
||||||
|
func (c *Cache[K, V]) evict() {
|
||||||
|
if c.access.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
k := c.access.K(0)
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictKey should remove the entry given by the key item.
|
||||||
|
func (c *Cache[K, V]) evictKey(k K) {
|
||||||
|
delete(c.store, k)
|
||||||
|
i, ok := c.access.Find(k)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.access.Delete(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) sanityCheck() {
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
panic(fmt.Sprintf("LRU cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||||
|
// data structures are consistent. It is not normally required, and it
|
||||||
|
// is primarily used in testing.
|
||||||
|
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
if err := c.access.ConsistencyCheck(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
return fmt.Errorf("lru: cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range c.access.ts {
|
||||||
|
itm, ok := c.store[c.access.K(i)]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("lru: key in access is not in store")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.access.T(i) != itm.access {
|
||||||
|
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||||
|
itm.access, c.access.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sort.IsSorted(c.access) {
|
||||||
|
return errors.New("lru: timestamps aren't sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store adds the value v to the cache under the k.
|
||||||
|
func (c *Cache[K, V]) Store(k K, v V) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
if len(c.store) == c.cap {
|
||||||
|
c.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.store[k]; ok {
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := &item[V]{
|
||||||
|
V: v,
|
||||||
|
access: c.clock.Now().UnixNano(),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k] = itm
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value stored in the cache. If the item isn't present,
|
||||||
|
// it will return false.
|
||||||
|
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
itm, ok := c.store[k]
|
||||||
|
if !ok {
|
||||||
|
var zero V
|
||||||
|
return zero, false
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k].access = c.clock.Now().UnixNano()
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
return itm.V, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has returns true if the cache has an entry for k. It will not update
|
||||||
|
// the timestamp on the item.
|
||||||
|
func (c *Cache[K, V]) Has(k K) bool {
|
||||||
|
// Don't need to lock as we don't modify anything.
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
_, ok := c.store[k]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
87
cache/lru/lru_internal_test.go
vendored
Normal file
87
cache/lru/lru_internal_test.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests mirror the MRU-style behavior present in this LRU package
|
||||||
|
// implementation (eviction removes the most-recently-used entry).
|
||||||
|
func TestBasicCacheEviction(t *testing.T) {
|
||||||
|
mock := clock.NewMock()
|
||||||
|
c := NewStringKeyCache[int](2)
|
||||||
|
c.clock = mock
|
||||||
|
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Len() != 0 {
|
||||||
|
t.Fatal("cache should have size 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.evict()
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Store("raven", 1)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 1 {
|
||||||
|
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("owl", 2)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("goat", 3)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this implementation evicts the most-recently-used item, inserting
|
||||||
|
// "goat" when full evicts "owl" (the most recent at that time).
|
||||||
|
mock.Add(time.Second)
|
||||||
|
if _, ok := c.Get("owl"); ok {
|
||||||
|
t.Fatal("store should not have an entry for owl (MRU-evicted)")
|
||||||
|
}
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("elk", 4)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("elk") {
|
||||||
|
t.Fatal("store should contain an entry for 'elk'")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before storing elk, keys were: raven (older), goat (newer). Evict MRU -> goat.
|
||||||
|
if !c.Has("raven") {
|
||||||
|
t.Fatal("store should contain an entry for 'raven'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Has("goat") {
|
||||||
|
t.Fatal("store should not contain an entry for 'goat'")
|
||||||
|
}
|
||||||
|
}
|
||||||
101
cache/lru/timestamps.go
vendored
Normal file
101
cache/lru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||||
|
// by timestamp.
|
||||||
|
|
||||||
|
type timestamp[K comparable] struct {
|
||||||
|
t int64
|
||||||
|
k K
|
||||||
|
}
|
||||||
|
|
||||||
|
type timestamps[K comparable] struct {
|
||||||
|
ts []timestamp[K]
|
||||||
|
cap int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||||
|
return ×tamps[K]{
|
||||||
|
ts: make([]timestamp[K], 0, icap),
|
||||||
|
cap: icap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) K(i int) K {
|
||||||
|
return ts.ts[i].k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) T(i int) int64 {
|
||||||
|
return ts.ts[i].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Len() int {
|
||||||
|
return len(ts.ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||||
|
return ts.ts[i].t > ts.ts[j].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Swap(i, j int) {
|
||||||
|
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
if ts.ts[i].k == k {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||||
|
i, ok := ts.Find(k)
|
||||||
|
if !ok {
|
||||||
|
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||||
|
sort.Sort(ts)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.ts[i].t = t
|
||||||
|
sort.Sort(ts)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||||
|
if !sort.IsSorted(ts) {
|
||||||
|
return errors.New("lru: timestamps are not sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := map[K]bool{}
|
||||||
|
for i := range ts.ts {
|
||||||
|
if keys[ts.ts[i].k] {
|
||||||
|
return fmt.Errorf("lru: duplicate key %v detected", ts.ts[i].k)
|
||||||
|
}
|
||||||
|
keys[ts.ts[i].k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) != len(ts.ts) {
|
||||||
|
return fmt.Errorf("lru: timestamp contains %d duplicate keys",
|
||||||
|
len(ts.ts)-len(keys))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Delete(i int) {
|
||||||
|
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests validate timestamps ordering semantics for the LRU package.
|
||||||
|
// Note: The LRU timestamps are sorted with most-recent-first (descending by t).
|
||||||
|
func TestTimestamps(t *testing.T) {
|
||||||
|
ts := newTimestamps[string](3)
|
||||||
|
mock := clock.NewMock()
|
||||||
|
|
||||||
|
// raven
|
||||||
|
ts.Update("raven", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl, goat
|
||||||
|
mock.Add(time.Second)
|
||||||
|
ts.Update("goat", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make owl the most recent
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For LRU timestamps: most recent first. Expected order: owl, goat, raven.
|
||||||
|
if ts.K(0) != "owl" {
|
||||||
|
t.Fatalf("first key should be owl, have %s", ts.K(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(1) != "goat" {
|
||||||
|
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(2) != "raven" {
|
||||||
|
t.Fatalf("third key should be raven, have %s", ts.K(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
178
cache/mru/mru.go
vendored
Normal file
178
cache/mru/mru.go
vendored
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
type item[V any] struct {
|
||||||
|
V V
|
||||||
|
access int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Cache is a map that retains a limited number of items. It must be
|
||||||
|
// initialized with New, providing a maximum capacity for the cache.
|
||||||
|
// Only the most recently used items are retained.
|
||||||
|
type Cache[K comparable, V any] struct {
|
||||||
|
store map[K]*item[V]
|
||||||
|
access *timestamps[K]
|
||||||
|
cap int
|
||||||
|
clock clock.Clock
|
||||||
|
// All public methods that have the possibility of modifying the
|
||||||
|
// cache should lock it.
|
||||||
|
mtx *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New must be used to create a new Cache.
|
||||||
|
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||||
|
return &Cache[K, V]{
|
||||||
|
store: map[K]*item[V]{},
|
||||||
|
access: newTimestamps[K](icap),
|
||||||
|
cap: icap,
|
||||||
|
clock: clock.New(),
|
||||||
|
mtx: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||||
|
type StringKeyCache[V any] struct {
|
||||||
|
*Cache[string, V]
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStringKeyCache creates a new MRU cache keyed by string.
|
||||||
|
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||||
|
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) lock() {
|
||||||
|
c.mtx.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) unlock() {
|
||||||
|
c.mtx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items currently in the cache.
|
||||||
|
func (c *Cache[K, V]) Len() int {
|
||||||
|
return len(c.store)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evict should remove the least-recently-used cache item.
|
||||||
|
func (c *Cache[K, V]) evict() {
|
||||||
|
if c.access.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
k := c.access.K(0)
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictKey should remove the entry given by the key item.
|
||||||
|
func (c *Cache[K, V]) evictKey(k K) {
|
||||||
|
delete(c.store, k)
|
||||||
|
i, ok := c.access.Find(k)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.access.Delete(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) sanityCheck() {
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
panic(fmt.Sprintf("MRU cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||||
|
// data structures are consistent. It is not normally required, and it
|
||||||
|
// is primarily used in testing.
|
||||||
|
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
if err := c.access.ConsistencyCheck(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
return fmt.Errorf("mru: cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range c.access.ts {
|
||||||
|
itm, ok := c.store[c.access.K(i)]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("mru: key in access is not in store")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.access.T(i) != itm.access {
|
||||||
|
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||||
|
itm.access, c.access.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sort.IsSorted(c.access) {
|
||||||
|
return errors.New("mru: timestamps aren't sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store adds the value v to the cache under the k.
|
||||||
|
func (c *Cache[K, V]) Store(k K, v V) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
if len(c.store) == c.cap {
|
||||||
|
c.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.store[k]; ok {
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := &item[V]{
|
||||||
|
V: v,
|
||||||
|
access: c.clock.Now().UnixNano(),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k] = itm
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value stored in the cache. If the item isn't present,
|
||||||
|
// it will return false.
|
||||||
|
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
itm, ok := c.store[k]
|
||||||
|
if !ok {
|
||||||
|
var zero V
|
||||||
|
return zero, false
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k].access = c.clock.Now().UnixNano()
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
return itm.V, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has returns true if the cache has an entry for k. It will not update
|
||||||
|
// the timestamp on the item.
|
||||||
|
func (c *Cache[K, V]) Has(k K) bool {
|
||||||
|
// Don't need to lock as we don't modify anything.
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
_, ok := c.store[k]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
92
cache/mru/mru_internal_test.go
vendored
Normal file
92
cache/mru/mru_internal_test.go
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBasicCacheEviction(t *testing.T) {
|
||||||
|
mock := clock.NewMock()
|
||||||
|
c := NewStringKeyCache[int](2)
|
||||||
|
c.clock = mock
|
||||||
|
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Len() != 0 {
|
||||||
|
t.Fatal("cache should have size 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.evict()
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Store("raven", 1)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 1 {
|
||||||
|
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("owl", 2)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("goat", 3)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
v, ok := c.Get("owl")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("store should have an entry for owl")
|
||||||
|
}
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := v
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if itm != 2 {
|
||||||
|
t.Fatalf("stored item should be 2, have %d", itm)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("elk", 4)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("elk") {
|
||||||
|
t.Fatal("store should contain an entry for 'elk'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("owl") {
|
||||||
|
t.Fatal("store should contain an entry for 'owl'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Has("goat") {
|
||||||
|
t.Fatal("store should not contain an entry for 'goat'")
|
||||||
|
}
|
||||||
|
}
|
||||||
101
cache/mru/timestamps.go
vendored
Normal file
101
cache/mru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||||
|
// by timestamp.
|
||||||
|
|
||||||
|
type timestamp[K comparable] struct {
|
||||||
|
t int64
|
||||||
|
k K
|
||||||
|
}
|
||||||
|
|
||||||
|
type timestamps[K comparable] struct {
|
||||||
|
ts []timestamp[K]
|
||||||
|
cap int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||||
|
return ×tamps[K]{
|
||||||
|
ts: make([]timestamp[K], 0, icap),
|
||||||
|
cap: icap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) K(i int) K {
|
||||||
|
return ts.ts[i].k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) T(i int) int64 {
|
||||||
|
return ts.ts[i].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Len() int {
|
||||||
|
return len(ts.ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||||
|
return ts.ts[i].t < ts.ts[j].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Swap(i, j int) {
|
||||||
|
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
if ts.ts[i].k == k {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||||
|
i, ok := ts.Find(k)
|
||||||
|
if !ok {
|
||||||
|
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||||
|
sort.Sort(ts)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.ts[i].t = t
|
||||||
|
sort.Sort(ts)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||||
|
if !sort.IsSorted(ts) {
|
||||||
|
return errors.New("mru: timestamps are not sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := map[K]bool{}
|
||||||
|
for i := range ts.ts {
|
||||||
|
if keys[ts.ts[i].k] {
|
||||||
|
return fmt.Errorf("duplicate key %v detected", ts.ts[i].k)
|
||||||
|
}
|
||||||
|
keys[ts.ts[i].k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) != len(ts.ts) {
|
||||||
|
return fmt.Errorf("mru: timestamp contains %d duplicate keys",
|
||||||
|
len(ts.ts)-len(keys))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Delete(i int) {
|
||||||
|
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimestamps(t *testing.T) {
|
||||||
|
ts := newTimestamps[string](3)
|
||||||
|
mock := clock.NewMock()
|
||||||
|
|
||||||
|
// raven
|
||||||
|
ts.Update("raven", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl, goat
|
||||||
|
mock.Add(time.Second)
|
||||||
|
ts.Update("goat", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
|
||||||
|
// raven, goat, owl
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point, the keys should be raven, goat, owl.
|
||||||
|
if ts.K(0) != "raven" {
|
||||||
|
t.Fatalf("first key should be raven, have %s", ts.K(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(1) != "goat" {
|
||||||
|
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(2) != "owl" {
|
||||||
|
t.Fatalf("third key should be owl, have %s", ts.K(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
33
certlib/certerr/doc.go
Normal file
33
certlib/certerr/doc.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
// Package certerr provides typed errors and helpers for certificate-related
|
||||||
|
// operations across the repository. It standardizes error construction and
|
||||||
|
// matching so callers can reliably branch on error source/kind using the
|
||||||
|
// Go 1.13+ `errors.Is` and `errors.As` helpers.
|
||||||
|
//
|
||||||
|
// Guidelines
|
||||||
|
// - Always wrap underlying causes using the helper constructors or with
|
||||||
|
// fmt.Errorf("context: %w", err).
|
||||||
|
// - Do not include sensitive data (keys, passwords, tokens) in error
|
||||||
|
// messages; add only non-sensitive, actionable context.
|
||||||
|
// - Prefer programmatic checks via errors.Is (for sentinel errors) and
|
||||||
|
// errors.As (to retrieve *certerr.Error) rather than relying on error
|
||||||
|
// string contents.
|
||||||
|
//
|
||||||
|
// Typical usage
|
||||||
|
//
|
||||||
|
// if err := doParse(); err != nil {
|
||||||
|
// return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Callers may branch on error kinds and sources:
|
||||||
|
//
|
||||||
|
// var e *certerr.Error
|
||||||
|
// if errors.As(err, &e) {
|
||||||
|
// switch e.Kind {
|
||||||
|
// case certerr.KindParse:
|
||||||
|
// // handle parse error
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Sentinel errors are provided for common conditions like
|
||||||
|
// `certerr.ErrEncryptedPrivateKey` and can be matched with `errors.Is`.
|
||||||
|
package certerr
|
||||||
@@ -37,43 +37,84 @@ const (
|
|||||||
ErrorSourceKeypair ErrorSourceType = 5
|
ErrorSourceKeypair ErrorSourceType = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
// ErrorKind is a broad classification describing what went wrong.
|
||||||
|
type ErrorKind uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
KindParse ErrorKind = iota + 1
|
||||||
|
KindDecode
|
||||||
|
KindVerify
|
||||||
|
KindLoad
|
||||||
|
)
|
||||||
|
|
||||||
|
func (k ErrorKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case KindParse:
|
||||||
|
return "parse"
|
||||||
|
case KindDecode:
|
||||||
|
return "decode"
|
||||||
|
case KindVerify:
|
||||||
|
return "verify"
|
||||||
|
case KindLoad:
|
||||||
|
return "load"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is a typed, wrapped error with structured context for programmatic checks.
|
||||||
|
// It implements error and supports errors.Is/As via Unwrap.
|
||||||
|
type Error struct {
|
||||||
|
Source ErrorSourceType // which domain produced the error (certificate, private key, etc.)
|
||||||
|
Kind ErrorKind // operation category (parse, decode, verify, load)
|
||||||
|
Op string // optional operation or function name
|
||||||
|
Err error // wrapped cause
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
// Keep message format consistent with existing helpers: "failed to <kind> <source>: <err>"
|
||||||
|
// Do not include Op by default to preserve existing output expectations.
|
||||||
|
return fmt.Sprintf("failed to %s %s: %v", e.Kind.String(), e.Source.String(), e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// InvalidPEMTypeError is used to indicate that we were expecting one type of PEM
|
||||||
// file, but saw another.
|
// file, but saw another.
|
||||||
type InvalidPEMType struct {
|
type InvalidPEMTypeError struct {
|
||||||
have string
|
have string
|
||||||
want []string
|
want []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *InvalidPEMType) Error() string {
|
func (err *InvalidPEMTypeError) Error() string {
|
||||||
if len(err.want) == 1 {
|
if len(err.want) == 1 {
|
||||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||||
} else {
|
}
|
||||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
// ErrInvalidPEMType returns a new InvalidPEMTypeError error.
|
||||||
func ErrInvalidPEMType(have string, want ...string) error {
|
func ErrInvalidPEMType(have string, want ...string) error {
|
||||||
return &InvalidPEMType{
|
return &InvalidPEMTypeError{
|
||||||
have: have,
|
have: have,
|
||||||
want: want,
|
want: want,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadingError(t ErrorSourceType, err error) error {
|
func LoadingError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to load %s from disk: %w", t, err)
|
return &Error{Source: t, Kind: KindLoad, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParsingError(t ErrorSourceType, err error) error {
|
func ParsingError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to parse %s: %w", t, err)
|
return &Error{Source: t, Kind: KindParse, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DecodeError(t ErrorSourceType, err error) error {
|
func DecodeError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to decode %s: %w", t, err)
|
return &Error{Source: t, Kind: KindDecode, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func VerifyError(t ErrorSourceType, err error) error {
|
func VerifyError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to verify %s: %w", t, err)
|
return &Error{Source: t, Kind: KindVerify, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
||||||
|
|||||||
56
certlib/certerr/errors_test.go
Normal file
56
certlib/certerr/errors_test.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
|
package certerr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTypedErrorWrappingAndFormatting(t *testing.T) {
|
||||||
|
cause := errors.New("bad data")
|
||||||
|
err := DecodeError(ErrorSourceCertificate, cause)
|
||||||
|
|
||||||
|
// Ensure we can retrieve the typed error
|
||||||
|
var e *Error
|
||||||
|
if !errors.As(err, &e) {
|
||||||
|
t.Fatalf("expected errors.As to retrieve *certerr.Error, got %T", err)
|
||||||
|
}
|
||||||
|
if e.Kind != KindDecode {
|
||||||
|
t.Fatalf("unexpected kind: %v", e.Kind)
|
||||||
|
}
|
||||||
|
if e.Source != ErrorSourceCertificate {
|
||||||
|
t.Fatalf("unexpected source: %v", e.Source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check message format (no trailing punctuation enforced by content)
|
||||||
|
msg := e.Error()
|
||||||
|
if !strings.Contains(msg, "failed to decode certificate") || !strings.Contains(msg, "bad data") {
|
||||||
|
t.Fatalf("unexpected error message: %q", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorsIsOnWrappedSentinel(t *testing.T) {
|
||||||
|
err := DecodeError(ErrorSourcePrivateKey, ErrEncryptedPrivateKey)
|
||||||
|
if !errors.Is(err, ErrEncryptedPrivateKey) {
|
||||||
|
t.Fatalf("expected errors.Is to match ErrEncryptedPrivateKey")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidPEMTypeMessageSingle(t *testing.T) {
|
||||||
|
err := ErrInvalidPEMType("FOO", "CERTIFICATE")
|
||||||
|
want := "invalid PEM type: have FOO, expected CERTIFICATE"
|
||||||
|
if err.Error() != want {
|
||||||
|
t.Fatalf("unexpected error message: got %q, want %q", err.Error(), want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidPEMTypeMessageMultiple(t *testing.T) {
|
||||||
|
err := ErrInvalidPEMType("FOO", "CERTIFICATE", "NEW CERTIFICATE REQUEST")
|
||||||
|
if !strings.Contains(
|
||||||
|
err.Error(),
|
||||||
|
"invalid PEM type: have FOO, expected one of CERTIFICATE, NEW CERTIFICATE REQUEST",
|
||||||
|
) {
|
||||||
|
t.Fatalf("unexpected error message: %q", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,43 +4,53 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"os"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||||
// byte slice.
|
// byte slice.
|
||||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
func ReadCertificate(in []byte) (*x509.Certificate, []byte, error) {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
err = certerr.ErrEmptyCertificate
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, certerr.ErrEmptyCertificate)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if in[0] == '-' {
|
if in[0] == '-' {
|
||||||
p, remaining := pem.Decode(in)
|
p, remaining := pem.Decode(in)
|
||||||
if p == nil {
|
if p == nil {
|
||||||
err = errors.New("certlib: invalid PEM file")
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("invalid PEM file"))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rest = remaining
|
rest := remaining
|
||||||
if p.Type != "CERTIFICATE" {
|
if p.Type != "CERTIFICATE" {
|
||||||
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
|
return nil, rest, certerr.ParsingError(
|
||||||
return
|
certerr.ErrorSourceCertificate,
|
||||||
|
certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
in = p.Bytes
|
in = p.Bytes
|
||||||
|
cert, err := x509.ParseCertificate(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return cert, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err = x509.ParseCertificate(in)
|
cert, err := x509.ParseCertificate(in)
|
||||||
return
|
if err != nil {
|
||||||
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return cert, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCertificates tries to read all the certificates in a
|
// ReadCertificates tries to read all the certificates in a
|
||||||
// PEM-encoded collection.
|
// PEM-encoded collection.
|
||||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
func ReadCertificates(in []byte) ([]*x509.Certificate, error) {
|
||||||
var cert *x509.Certificate
|
var cert *x509.Certificate
|
||||||
|
var certs []*x509.Certificate
|
||||||
|
var err error
|
||||||
for {
|
for {
|
||||||
cert, in, err = ReadCertificate(in)
|
cert, in, err = ReadCertificate(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -64,9 +74,9 @@ func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
|||||||
// the file contains multiple certificates (e.g. a chain), only the
|
// the file contains multiple certificates (e.g. a chain), only the
|
||||||
// first certificate is returned.
|
// first certificate is returned.
|
||||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, _, err := ReadCertificate(in)
|
cert, _, err := ReadCertificate(in)
|
||||||
@@ -76,9 +86,9 @@ func LoadCertificate(path string) (*x509.Certificate, error) {
|
|||||||
// LoadCertificates tries to read all the certificates in a file,
|
// LoadCertificates tries to read all the certificates in a file,
|
||||||
// returning them in the order that it found them in the file.
|
// returning them in the order that it found them in the file.
|
||||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ReadCertificates(in)
|
return ReadCertificates(in)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
package certlib
|
package certlib
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ import (
|
|||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
@@ -47,29 +48,36 @@ import (
|
|||||||
// private key. The key must not be in PEM format. If an error is returned, it
|
// private key. The key must not be in PEM format. If an error is returned, it
|
||||||
// may contain information about the private key, so care should be taken when
|
// may contain information about the private key, so care should be taken when
|
||||||
// displaying it directly.
|
// displaying it directly.
|
||||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyDER(keyDER []byte) (crypto.Signer, error) {
|
||||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
// Try common encodings in order without deep nesting.
|
||||||
if err != nil {
|
if k, err := x509.ParsePKCS8PrivateKey(keyDER); err == nil {
|
||||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
switch kk := k.(type) {
|
||||||
if err != nil {
|
case *rsa.PrivateKey:
|
||||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
return kk, nil
|
||||||
if err != nil {
|
case *ecdsa.PrivateKey:
|
||||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
return kk, nil
|
||||||
if err != nil {
|
case ed25519.PrivateKey:
|
||||||
|
return kk, nil
|
||||||
|
default:
|
||||||
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k, err := x509.ParsePKCS1PrivateKey(keyDER); err == nil {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
if k, err := x509.ParseECPrivateKey(keyDER); err == nil {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
if k, err := ParseEd25519PrivateKey(keyDER); err == nil {
|
||||||
|
if kk, ok := k.(ed25519.PrivateKey); ok {
|
||||||
|
return kk, nil
|
||||||
|
}
|
||||||
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||||
|
}
|
||||||
|
// If all parsers failed, return the last error from Ed25519 attempt (approximate cause).
|
||||||
|
if _, err := ParseEd25519PrivateKey(keyDER); err != nil {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||||
}
|
}
|
||||||
}
|
// Fallback (should be unreachable)
|
||||||
}
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, errors.New("unknown key encoding"))
|
||||||
}
|
|
||||||
|
|
||||||
switch generalKey := generalKey.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
default:
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -65,12 +65,14 @@ func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
|||||||
return nil, errEd25519WrongKeyType
|
return nil, errEd25519WrongKeyType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const bitsPerByte = 8
|
||||||
|
|
||||||
spki := subjectPublicKeyInfo{
|
spki := subjectPublicKeyInfo{
|
||||||
Algorithm: pkix.AlgorithmIdentifier{
|
Algorithm: pkix.AlgorithmIdentifier{
|
||||||
Algorithm: ed25519OID,
|
Algorithm: ed25519OID,
|
||||||
},
|
},
|
||||||
PublicKey: asn1.BitString{
|
PublicKey: asn1.BitString{
|
||||||
BitLength: len(pub) * 8,
|
BitLength: len(pub) * bitsPerByte,
|
||||||
Bytes: pub,
|
Bytes: pub,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -91,7 +93,8 @@ func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
|||||||
return nil, errEd25519WrongID
|
return nil, errEd25519WrongID
|
||||||
}
|
}
|
||||||
|
|
||||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
const bitsPerByte = 8
|
||||||
|
if spki.PublicKey.BitLength != ed25519.PublicKeySize*bitsPerByte {
|
||||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,14 +49,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
|
||||||
|
|
||||||
ct "github.com/google/certificate-transparency-go"
|
ct "github.com/google/certificate-transparency-go"
|
||||||
cttls "github.com/google/certificate-transparency-go/tls"
|
cttls "github.com/google/certificate-transparency-go/tls"
|
||||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||||
"golang.org/x/crypto/ocsp"
|
"golang.org/x/crypto/ocsp"
|
||||||
"golang.org/x/crypto/pkcs12"
|
"golang.org/x/crypto/pkcs12"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||||
@@ -65,10 +65,10 @@ const OneYear = 8760 * time.Hour
|
|||||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||||
const OneDay = 24 * time.Hour
|
const OneDay = 24 * time.Hour
|
||||||
|
|
||||||
// DelegationUsage is the OID for the DelegationUseage extensions
|
// DelegationUsage is the OID for the DelegationUseage extensions.
|
||||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||||
|
|
||||||
// DelegationExtension
|
// DelegationExtension is a non-critical extension marking delegation usage.
|
||||||
var DelegationExtension = pkix.Extension{
|
var DelegationExtension = pkix.Extension{
|
||||||
Id: DelegationUsage,
|
Id: DelegationUsage,
|
||||||
Critical: false,
|
Critical: false,
|
||||||
@@ -81,41 +81,51 @@ func InclusiveDate(year int, month time.Month, day int) time.Time {
|
|||||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
year2012 = 2012
|
||||||
|
year2015 = 2015
|
||||||
|
day1 = 1
|
||||||
|
)
|
||||||
|
|
||||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||||
// issuing certificates valid for more than 5 years.
|
// issuing certificates valid for more than 5 years.
|
||||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
var Jul2012 = InclusiveDate(year2012, time.July, day1)
|
||||||
|
|
||||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||||
// issuing certificates valid for more than 39 months.
|
// issuing certificates valid for more than 39 months.
|
||||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
var Apr2015 = InclusiveDate(year2015, time.April, day1)
|
||||||
|
|
||||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
// KeyLength returns the bit size of ECDSA or RSA PublicKey.
|
||||||
func KeyLength(key interface{}) int {
|
func KeyLength(key any) int {
|
||||||
if key == nil {
|
switch k := key.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
if k == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
return k.Curve.Params().BitSize
|
||||||
return ecdsaKey.Curve.Params().BitSize
|
case *rsa.PublicKey:
|
||||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
if k == nil {
|
||||||
return rsaKey.N.BitLen()
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
return k.N.BitLen()
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ExpiryTime returns the time when the certificate chain is expired.
|
// ExpiryTime returns the time when the certificate chain is expired.
|
||||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
func ExpiryTime(chain []*x509.Certificate) time.Time {
|
||||||
|
var notAfter time.Time
|
||||||
if len(chain) == 0 {
|
if len(chain) == 0 {
|
||||||
return
|
return notAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
notAfter = chain[0].NotAfter
|
notAfter = chain[0].NotAfter
|
||||||
for _, cert := range chain {
|
for _, cert := range chain {
|
||||||
if notAfter.After(cert.NotAfter) {
|
if notAfter.After(cert.NotAfter) {
|
||||||
notAfter = cert.NotAfter
|
notAfter = cert.NotAfter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return notAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
// MonthsValid returns the number of months for which a certificate is valid.
|
// MonthsValid returns the number of months for which a certificate is valid.
|
||||||
@@ -144,109 +154,109 @@ func ValidExpiry(c *x509.Certificate) bool {
|
|||||||
maxMonths = 39
|
maxMonths = 39
|
||||||
case issued.After(Jul2012):
|
case issued.After(Jul2012):
|
||||||
maxMonths = 60
|
maxMonths = 60
|
||||||
case issued.Before(Jul2012):
|
default:
|
||||||
maxMonths = 120
|
maxMonths = 120
|
||||||
}
|
}
|
||||||
|
|
||||||
if MonthsValid(c) > maxMonths {
|
return MonthsValid(c) <= maxMonths
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return true
|
|
||||||
|
// SignatureString returns the TLS signature string corresponding to
|
||||||
|
// an X509 signature algorithm.
|
||||||
|
var signatureString = map[x509.SignatureAlgorithm]string{
|
||||||
|
x509.UnknownSignatureAlgorithm: "Unknown Signature",
|
||||||
|
x509.MD2WithRSA: "MD2WithRSA",
|
||||||
|
x509.MD5WithRSA: "MD5WithRSA",
|
||||||
|
x509.SHA1WithRSA: "SHA1WithRSA",
|
||||||
|
x509.SHA256WithRSA: "SHA256WithRSA",
|
||||||
|
x509.SHA384WithRSA: "SHA384WithRSA",
|
||||||
|
x509.SHA512WithRSA: "SHA512WithRSA",
|
||||||
|
x509.SHA256WithRSAPSS: "SHA256WithRSAPSS",
|
||||||
|
x509.SHA384WithRSAPSS: "SHA384WithRSAPSS",
|
||||||
|
x509.SHA512WithRSAPSS: "SHA512WithRSAPSS",
|
||||||
|
x509.DSAWithSHA1: "DSAWithSHA1",
|
||||||
|
x509.DSAWithSHA256: "DSAWithSHA256",
|
||||||
|
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
|
||||||
|
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
|
||||||
|
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
|
||||||
|
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
|
||||||
|
x509.PureEd25519: "PureEd25519",
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignatureString returns the TLS signature string corresponding to
|
// SignatureString returns the TLS signature string corresponding to
|
||||||
// an X509 signature algorithm.
|
// an X509 signature algorithm.
|
||||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||||
switch alg {
|
if s, ok := signatureString[alg]; ok {
|
||||||
case x509.MD2WithRSA:
|
return s
|
||||||
return "MD2WithRSA"
|
}
|
||||||
case x509.MD5WithRSA:
|
|
||||||
return "MD5WithRSA"
|
|
||||||
case x509.SHA1WithRSA:
|
|
||||||
return "SHA1WithRSA"
|
|
||||||
case x509.SHA256WithRSA:
|
|
||||||
return "SHA256WithRSA"
|
|
||||||
case x509.SHA384WithRSA:
|
|
||||||
return "SHA384WithRSA"
|
|
||||||
case x509.SHA512WithRSA:
|
|
||||||
return "SHA512WithRSA"
|
|
||||||
case x509.DSAWithSHA1:
|
|
||||||
return "DSAWithSHA1"
|
|
||||||
case x509.DSAWithSHA256:
|
|
||||||
return "DSAWithSHA256"
|
|
||||||
case x509.ECDSAWithSHA1:
|
|
||||||
return "ECDSAWithSHA1"
|
|
||||||
case x509.ECDSAWithSHA256:
|
|
||||||
return "ECDSAWithSHA256"
|
|
||||||
case x509.ECDSAWithSHA384:
|
|
||||||
return "ECDSAWithSHA384"
|
|
||||||
case x509.ECDSAWithSHA512:
|
|
||||||
return "ECDSAWithSHA512"
|
|
||||||
default:
|
|
||||||
return "Unknown Signature"
|
return "Unknown Signature"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||||
|
// method.
|
||||||
|
var hashAlgoString = map[x509.SignatureAlgorithm]string{
|
||||||
|
x509.UnknownSignatureAlgorithm: "Unknown Hash Algorithm",
|
||||||
|
x509.MD2WithRSA: "MD2",
|
||||||
|
x509.MD5WithRSA: "MD5",
|
||||||
|
x509.SHA1WithRSA: "SHA1",
|
||||||
|
x509.SHA256WithRSA: "SHA256",
|
||||||
|
x509.SHA384WithRSA: "SHA384",
|
||||||
|
x509.SHA512WithRSA: "SHA512",
|
||||||
|
x509.SHA256WithRSAPSS: "SHA256",
|
||||||
|
x509.SHA384WithRSAPSS: "SHA384",
|
||||||
|
x509.SHA512WithRSAPSS: "SHA512",
|
||||||
|
x509.DSAWithSHA1: "SHA1",
|
||||||
|
x509.DSAWithSHA256: "SHA256",
|
||||||
|
x509.ECDSAWithSHA1: "SHA1",
|
||||||
|
x509.ECDSAWithSHA256: "SHA256",
|
||||||
|
x509.ECDSAWithSHA384: "SHA384",
|
||||||
|
x509.ECDSAWithSHA512: "SHA512",
|
||||||
|
x509.PureEd25519: "SHA512", // per x509 docs Ed25519 uses SHA-512 internally
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||||
// method.
|
// method.
|
||||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||||
switch alg {
|
if s, ok := hashAlgoString[alg]; ok {
|
||||||
case x509.MD2WithRSA:
|
return s
|
||||||
return "MD2"
|
|
||||||
case x509.MD5WithRSA:
|
|
||||||
return "MD5"
|
|
||||||
case x509.SHA1WithRSA:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.SHA256WithRSA:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.SHA384WithRSA:
|
|
||||||
return "SHA384"
|
|
||||||
case x509.SHA512WithRSA:
|
|
||||||
return "SHA512"
|
|
||||||
case x509.DSAWithSHA1:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.DSAWithSHA256:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.ECDSAWithSHA1:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.ECDSAWithSHA256:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.ECDSAWithSHA384:
|
|
||||||
return "SHA384"
|
|
||||||
case x509.ECDSAWithSHA512:
|
|
||||||
return "SHA512"
|
|
||||||
default:
|
|
||||||
return "Unknown Hash Algorithm"
|
|
||||||
}
|
}
|
||||||
|
return "Unknown Hash Algorithm"
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringTLSVersion returns underlying enum values from human names for TLS
|
// StringTLSVersion returns underlying enum values from human names for TLS
|
||||||
// versions, defaults to current golang default of TLS 1.0
|
// versions, defaults to current golang default of TLS 1.0.
|
||||||
func StringTLSVersion(version string) uint16 {
|
func StringTLSVersion(version string) uint16 {
|
||||||
switch version {
|
switch version {
|
||||||
|
case "1.3":
|
||||||
|
return tls.VersionTLS13
|
||||||
case "1.2":
|
case "1.2":
|
||||||
return tls.VersionTLS12
|
return tls.VersionTLS12
|
||||||
case "1.1":
|
case "1.1":
|
||||||
return tls.VersionTLS11
|
return tls.VersionTLS11
|
||||||
|
case "1.0":
|
||||||
|
return tls.VersionTLS10
|
||||||
default:
|
default:
|
||||||
|
// Default to Go's historical default of TLS 1.0 for unknown values
|
||||||
return tls.VersionTLS10
|
return tls.VersionTLS10
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
|
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM.
|
||||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
for _, cert := range certs {
|
for _, cert := range certs {
|
||||||
pem.Encode(&buffer, &pem.Block{
|
if err := pem.Encode(&buffer, &pem.Block{
|
||||||
Type: "CERTIFICATE",
|
Type: "CERTIFICATE",
|
||||||
Bytes: cert.Raw,
|
Bytes: cert.Raw,
|
||||||
})
|
}); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return buffer.Bytes()
|
return buffer.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM
|
// EncodeCertificatePEM encodes a single x509 certificates to PEM.
|
||||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||||
}
|
}
|
||||||
@@ -269,38 +279,52 @@ func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
|||||||
certs = append(certs, cert...)
|
certs = append(certs, cert...)
|
||||||
}
|
}
|
||||||
if len(certsPEM) > 0 {
|
if len(certsPEM) > 0 {
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
|
return nil, certerr.DecodeError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("trailing data at end of certificate"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return certs, nil
|
return certs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||||
// either PKCS #7, PKCS #12, or raw x509.
|
// either PKCS #7, PKCS #12, or raw x509.
|
||||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
func ParseCertificatesDER(certsDER []byte, password string) ([]*x509.Certificate, crypto.Signer, error) {
|
||||||
certsDER = bytes.TrimSpace(certsDER)
|
certsDER = bytes.TrimSpace(certsDER)
|
||||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
|
||||||
if err != nil {
|
// First, try PKCS #7
|
||||||
var pkcs12data interface{}
|
if pkcs7data, err7 := pkcs7.ParsePKCS7(certsDER); err7 == nil {
|
||||||
certs = make([]*x509.Certificate, 1)
|
if pkcs7data.ContentInfo != "SignedData" {
|
||||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
return nil, nil, certerr.DecodeError(
|
||||||
if err != nil {
|
certerr.ErrorSourceCertificate,
|
||||||
certs, err = x509.ParseCertificates(certsDER)
|
errors.New("can only extract certificates from signed data content info"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
certs := pkcs7data.Content.SignedData.Certificates
|
||||||
|
if certs == nil {
|
||||||
|
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||||
|
}
|
||||||
|
return certs, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next, try PKCS #12
|
||||||
|
if pkcs12data, cert, err12 := pkcs12.Decode(certsDER, password); err12 == nil {
|
||||||
|
signer, ok := pkcs12data.(crypto.Signer)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, certerr.DecodeError(
|
||||||
|
certerr.ErrorSourcePrivateKey,
|
||||||
|
errors.New("PKCS12 data does not contain a private key"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return []*x509.Certificate{cert}, signer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, attempt to parse raw X.509 certificates
|
||||||
|
certs, err := x509.ParseCertificates(certsDER)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
} else {
|
return certs, nil, nil
|
||||||
key = pkcs12data.(crypto.Signer)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if pkcs7data.ContentInfo != "SignedData" {
|
|
||||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
|
|
||||||
}
|
|
||||||
certs = pkcs7data.Content.SignedData.Certificates
|
|
||||||
}
|
|
||||||
if certs == nil {
|
|
||||||
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
|
||||||
}
|
|
||||||
return certs, key, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||||
@@ -310,7 +334,8 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
|
||||||
|
if err != nil {
|
||||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
return cert, nil
|
return cert, nil
|
||||||
@@ -320,17 +345,26 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
// can handle PEM encoded PKCS #7 structures.
|
// can handle PEM encoded PKCS #7 structures.
|
||||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||||
certPEM = bytes.TrimSpace(certPEM)
|
certPEM = bytes.TrimSpace(certPEM)
|
||||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
certs, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
} else if cert == nil {
|
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
|
||||||
} else if len(rest) > 0 {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
|
|
||||||
} else if len(cert) > 1 {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
|
||||||
}
|
}
|
||||||
return cert[0], nil
|
if certs == nil {
|
||||||
|
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||||
|
}
|
||||||
|
if len(rest) > 0 {
|
||||||
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("the PEM file should contain only one object"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if len(certs) > 1 {
|
||||||
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("the PKCS7 object in the PEM file should contain only one certificate"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return certs[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||||
@@ -338,7 +372,6 @@ func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
// multiple certificates, from the top of certsPEM, which itself may
|
// multiple certificates, from the top of certsPEM, which itself may
|
||||||
// contain multiple PEM encoded certificate objects.
|
// contain multiple PEM encoded certificate objects.
|
||||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||||
|
|
||||||
block, rest := pem.Decode(certsPEM)
|
block, rest := pem.Decode(certsPEM)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, rest, nil
|
return nil, rest, nil
|
||||||
@@ -346,8 +379,8 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
|||||||
|
|
||||||
cert, err := x509.ParseCertificate(block.Bytes)
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
pkcs7data, err2 := pkcs7.ParsePKCS7(block.Bytes)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return nil, rest, err
|
return nil, rest, err
|
||||||
}
|
}
|
||||||
if pkcs7data.ContentInfo != "SignedData" {
|
if pkcs7data.ContentInfo != "SignedData" {
|
||||||
@@ -366,7 +399,7 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
|||||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||||
if certsFile == "" {
|
if certsFile == "" {
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // no CA file provided -> treat as no pool and no error
|
||||||
}
|
}
|
||||||
pemCerts, err := os.ReadFile(certsFile)
|
pemCerts, err := os.ReadFile(certsFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -379,12 +412,12 @@ func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
|||||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||||
if len(pemCerts) == 0 {
|
if len(pemCerts) == 0 {
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // empty input means no pool needed
|
||||||
}
|
}
|
||||||
|
|
||||||
certPool := x509.NewCertPool()
|
certPool := x509.NewCertPool()
|
||||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||||
return nil, errors.New("failed to load cert pool")
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, errors.New("failed to load cert pool"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return certPool, nil
|
return certPool, nil
|
||||||
@@ -393,14 +426,14 @@ func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
|||||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||||
// or elliptic private key.
|
// or elliptic private key.
|
||||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyPEM(keyPEM []byte) (crypto.Signer, error) {
|
||||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||||
// or elliptic private key.
|
// or elliptic private key.
|
||||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (crypto.Signer, error) {
|
||||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -420,44 +453,49 @@ func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if keyDER != nil {
|
if keyDER == nil {
|
||||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||||
if strings.Contains(procType, "ENCRYPTED") {
|
}
|
||||||
|
if procType, ok := keyDER.Headers["Proc-Type"]; ok && strings.Contains(procType, "ENCRYPTED") {
|
||||||
if password != nil {
|
if password != nil {
|
||||||
|
// nolintlint requires rationale:
|
||||||
|
//nolint:staticcheck // legacy RFC1423 PEM encryption supported for backward compatibility when caller supplies a password
|
||||||
return x509.DecryptPEMBlock(keyDER, password)
|
return x509.DecryptPEMBlock(keyDER, password)
|
||||||
}
|
}
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return keyDER.Bytes, nil
|
return keyDER.Bytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
func ParseCSR(in []byte) (*x509.CertificateRequest, []byte, error) {
|
||||||
in = bytes.TrimSpace(in)
|
in = bytes.TrimSpace(in)
|
||||||
p, rest := pem.Decode(in)
|
p, rest := pem.Decode(in)
|
||||||
if p != nil {
|
if p == nil {
|
||||||
|
csr, err := x509.ParseCertificateRequest(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
|
}
|
||||||
|
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||||
|
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||||
|
}
|
||||||
|
return csr, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
|
return nil, rest, certerr.ParsingError(
|
||||||
}
|
certerr.ErrorSourceCSR,
|
||||||
|
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
|
||||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
)
|
||||||
} else {
|
|
||||||
csr, err = x509.ParseCertificateRequest(in)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
csr, err := x509.ParseCertificateRequest(p.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, rest, err
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
}
|
}
|
||||||
|
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||||
err = csr.CheckSignature()
|
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||||
if err != nil {
|
|
||||||
return nil, rest, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return csr, rest, nil
|
return csr, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -465,14 +503,14 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
|
|||||||
// It does not check the signature. This is useful for dumping data from a CSR
|
// It does not check the signature. This is useful for dumping data from a CSR
|
||||||
// locally.
|
// locally.
|
||||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||||
block, _ := pem.Decode([]byte(csrPEM))
|
block, _ := pem.Decode(csrPEM)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||||
}
|
}
|
||||||
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return csrObject, nil
|
return csrObject, nil
|
||||||
@@ -480,15 +518,20 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
|||||||
|
|
||||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||||
|
const (
|
||||||
|
rsaBits2048 = 2048
|
||||||
|
rsaBits3072 = 3072
|
||||||
|
rsaBits4096 = 4096
|
||||||
|
)
|
||||||
switch pub := priv.Public().(type) {
|
switch pub := priv.Public().(type) {
|
||||||
case *rsa.PublicKey:
|
case *rsa.PublicKey:
|
||||||
bitLength := pub.N.BitLen()
|
bitLength := pub.N.BitLen()
|
||||||
switch {
|
switch {
|
||||||
case bitLength >= 4096:
|
case bitLength >= rsaBits4096:
|
||||||
return x509.SHA512WithRSA
|
return x509.SHA512WithRSA
|
||||||
case bitLength >= 3072:
|
case bitLength >= rsaBits3072:
|
||||||
return x509.SHA384WithRSA
|
return x509.SHA384WithRSA
|
||||||
case bitLength >= 2048:
|
case bitLength >= rsaBits2048:
|
||||||
return x509.SHA256WithRSA
|
return x509.SHA256WithRSA
|
||||||
default:
|
default:
|
||||||
return x509.SHA1WithRSA
|
return x509.SHA1WithRSA
|
||||||
@@ -509,7 +552,7 @@ func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadClientCertificate load key/certificate from pem files
|
// LoadClientCertificate load key/certificate from pem files.
|
||||||
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
||||||
if certFile != "" && keyFile != "" {
|
if certFile != "" && keyFile != "" {
|
||||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
@@ -518,10 +561,10 @@ func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, e
|
|||||||
}
|
}
|
||||||
return &cert, nil
|
return &cert, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // absence of client cert is not an error
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTLSConfig creates a tls.Config object from certs and roots
|
// CreateTLSConfig creates a tls.Config object from certs and roots.
|
||||||
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
||||||
var certs []tls.Certificate
|
var certs []tls.Certificate
|
||||||
if cert != nil {
|
if cert != nil {
|
||||||
@@ -530,6 +573,7 @@ func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Confi
|
|||||||
return &tls.Config{
|
return &tls.Config{
|
||||||
Certificates: certs,
|
Certificates: certs,
|
||||||
RootCAs: remoteCAs,
|
RootCAs: remoteCAs,
|
||||||
|
MinVersion: tls.VersionTLS12, // secure default
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,18 +598,24 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(rest) != 0 {
|
if len(rest) != 0 {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceSCTList,
|
||||||
|
errors.New("serialized SCT list contained trailing garbage"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||||
for i, serializedSCT := range sctList.SCTList {
|
for i, serializedSCT := range sctList.SCTList {
|
||||||
var sct ct.SignedCertificateTimestamp
|
var sct ct.SignedCertificateTimestamp
|
||||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
rest2, err2 := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return nil, err
|
return nil, err2
|
||||||
}
|
}
|
||||||
if len(rest) != 0 {
|
if len(rest2) != 0 {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceSCTList,
|
||||||
|
errors.New("serialized SCT list contained trailing garbage"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
list[i] = sct
|
list[i] = sct
|
||||||
}
|
}
|
||||||
@@ -577,12 +627,12 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
|||||||
// unmarshalled.
|
// unmarshalled.
|
||||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||||
// This loop finds the SCTListExtension in the OCSP response.
|
// This loop finds the SCTListExtension in the OCSP response.
|
||||||
var SCTListExtension, ext pkix.Extension
|
var sctListExtension, ext pkix.Extension
|
||||||
for _, ext = range response.Extensions {
|
for _, ext = range response.Extensions {
|
||||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||||
if ext.Id.Equal(sctExtOid) {
|
if ext.Id.Equal(sctExtOid) {
|
||||||
SCTListExtension = ext
|
sctListExtension = ext
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -590,10 +640,10 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
|||||||
// This code block extracts the sctList from the SCT extension.
|
// This code block extracts the sctList from the SCT extension.
|
||||||
var sctList []ct.SignedCertificateTimestamp
|
var sctList []ct.SignedCertificateTimestamp
|
||||||
var err error
|
var err error
|
||||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
if numBytes := len(sctListExtension.Value); numBytes != 0 {
|
||||||
var serializedSCTList []byte
|
var serializedSCTList []byte
|
||||||
rest := make([]byte, numBytes)
|
rest := make([]byte, numBytes)
|
||||||
copy(rest, SCTListExtension.Value)
|
copy(rest, sctListExtension.Value)
|
||||||
for len(rest) != 0 {
|
for len(rest) != 0 {
|
||||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -611,20 +661,16 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
|||||||
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
||||||
// file path.
|
// file path.
|
||||||
func ReadBytes(valFile string) ([]byte, error) {
|
func ReadBytes(valFile string) ([]byte, error) {
|
||||||
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
|
prefix, rest, found := strings.Cut(valFile, ":")
|
||||||
case 1:
|
if !found {
|
||||||
return os.ReadFile(valFile)
|
return os.ReadFile(valFile)
|
||||||
case 2:
|
}
|
||||||
switch splitVal[0] {
|
switch prefix {
|
||||||
case "env":
|
case "env":
|
||||||
return []byte(os.Getenv(splitVal[1])), nil
|
return []byte(os.Getenv(rest)), nil
|
||||||
case "file":
|
case "file":
|
||||||
return os.ReadFile(splitVal[1])
|
return os.ReadFile(rest)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
|
return nil, fmt.Errorf("unknown prefix: %s", prefix)
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("multiple prefixes: %s",
|
|
||||||
strings.Join(splitVal[:len(splitVal)-1], ", "))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
84
certlib/hosts/hosts.go
Normal file
84
certlib/hosts/hosts.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package hosts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultHTTPSPort = 443
|
||||||
|
|
||||||
|
type Target struct {
|
||||||
|
Host string
|
||||||
|
Port int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Target) String() string {
|
||||||
|
return fmt.Sprintf("%s:%d", t.Host, t.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseURL(host string) (string, int, error) {
|
||||||
|
url, err := url.Parse(host)
|
||||||
|
if err != nil {
|
||||||
|
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.ToLower(url.Scheme) != "https" {
|
||||||
|
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
if url.Port() == "" {
|
||||||
|
return url.Hostname(), defaultHTTPSPort, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
portInt, err2 := strconv.ParseInt(url.Port(), 10, 16)
|
||||||
|
if err2 != nil {
|
||||||
|
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
||||||
|
}
|
||||||
|
|
||||||
|
return url.Hostname(), int(portInt), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHostPort(host string) (string, int, error) {
|
||||||
|
host, sport, err := net.SplitHostPort(host)
|
||||||
|
if err == nil {
|
||||||
|
portInt, err2 := strconv.ParseInt(sport, 10, 16)
|
||||||
|
if err2 != nil {
|
||||||
|
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||||
|
}
|
||||||
|
|
||||||
|
return host, int(portInt), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return host, defaultHTTPSPort, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseHost(host string) (*Target, error) {
|
||||||
|
host, port, err := parseURL(host)
|
||||||
|
if err == nil {
|
||||||
|
return &Target{Host: host, Port: port}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err = parseHostPort(host)
|
||||||
|
if err == nil {
|
||||||
|
return &Target{Host: host, Port: port}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseHosts(hosts ...string) ([]*Target, error) {
|
||||||
|
targets := make([]*Target, 0, len(hosts))
|
||||||
|
for _, host := range hosts {
|
||||||
|
target, err := ParseHost(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
targets = append(targets, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets, nil
|
||||||
|
}
|
||||||
@@ -93,7 +93,7 @@ type signedData struct {
|
|||||||
Version int
|
Version int
|
||||||
DigestAlgorithms asn1.RawValue
|
DigestAlgorithms asn1.RawValue
|
||||||
ContentInfo asn1.RawValue
|
ContentInfo asn1.RawValue
|
||||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
Certificates asn1.RawValue `asn1:"optional"`
|
||||||
Crls asn1.RawValue `asn1:"optional"`
|
Crls asn1.RawValue `asn1:"optional"`
|
||||||
SignerInfos asn1.RawValue
|
SignerInfos asn1.RawValue
|
||||||
}
|
}
|
||||||
@@ -158,63 +158,95 @@ type EncryptedContentInfo struct {
|
|||||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func unmarshalInit(raw []byte) (initPKCS7, error) {
|
||||||
|
var init initPKCS7
|
||||||
|
if _, err := asn1.Unmarshal(raw, &init); err != nil {
|
||||||
|
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return init, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateData(msg *PKCS7, content asn1.RawValue) error {
|
||||||
|
msg.ContentInfo = "Data"
|
||||||
|
_, err := asn1.Unmarshal(content.Bytes, &msg.Content.Data)
|
||||||
|
if err != nil {
|
||||||
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateSignedData(msg *PKCS7, contentBytes []byte) error {
|
||||||
|
msg.ContentInfo = "SignedData"
|
||||||
|
var sd signedData
|
||||||
|
if _, err := asn1.Unmarshal(contentBytes, &sd); err != nil {
|
||||||
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
if len(sd.Certificates.Bytes) != 0 {
|
||||||
|
certs, err := x509.ParseCertificates(sd.Certificates.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
msg.Content.SignedData.Certificates = certs
|
||||||
|
}
|
||||||
|
if len(sd.Crls.Bytes) != 0 {
|
||||||
|
crl, err := x509.ParseRevocationList(sd.Crls.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
msg.Content.SignedData.Crl = crl
|
||||||
|
}
|
||||||
|
msg.Content.SignedData.Version = sd.Version
|
||||||
|
msg.Content.SignedData.Raw = contentBytes
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
||||||
|
msg.ContentInfo = "EncryptedData"
|
||||||
|
var ed EncryptedData
|
||||||
|
if _, err := asn1.Unmarshal(contentBytes, &ed); err != nil {
|
||||||
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
if ed.Version != 0 {
|
||||||
|
return certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("only PKCS #7 encryptedData version 0 is supported"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
msg.Content.EncryptedData = ed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||||
// PKCS7 structure.
|
// PKCS7 structure.
|
||||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
func ParsePKCS7(raw []byte) (*PKCS7, error) {
|
||||||
|
pkcs7, err := unmarshalInit(raw)
|
||||||
var pkcs7 initPKCS7
|
|
||||||
_, err = asn1.Unmarshal(raw, &pkcs7)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = new(PKCS7)
|
msg := new(PKCS7)
|
||||||
msg.Raw = pkcs7.Raw
|
msg.Raw = pkcs7.Raw
|
||||||
msg.ContentInfo = pkcs7.ContentType.String()
|
msg.ContentInfo = pkcs7.ContentType.String()
|
||||||
switch {
|
|
||||||
case msg.ContentInfo == ObjIDData:
|
|
||||||
msg.ContentInfo = "Data"
|
|
||||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
case msg.ContentInfo == ObjIDSignedData:
|
|
||||||
msg.ContentInfo = "SignedData"
|
|
||||||
var signedData signedData
|
|
||||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
|
|
||||||
if err != nil {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
if len(signedData.Certificates.Bytes) != 0 {
|
|
||||||
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(signedData.Crls.Bytes) != 0 {
|
|
||||||
msg.Content.SignedData.Crl, err = x509.ParseRevocationList(signedData.Crls.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
msg.Content.SignedData.Version = signedData.Version
|
|
||||||
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
|
|
||||||
case msg.ContentInfo == ObjIDEncryptedData:
|
|
||||||
msg.ContentInfo = "EncryptedData"
|
|
||||||
var encryptedData EncryptedData
|
|
||||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
|
|
||||||
if err != nil {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
if encryptedData.Version != 0 {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
|
|
||||||
}
|
|
||||||
msg.Content.EncryptedData = encryptedData
|
|
||||||
|
|
||||||
|
switch msg.ContentInfo {
|
||||||
|
case ObjIDData:
|
||||||
|
if e := populateData(msg, pkcs7.Content); e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
case ObjIDSignedData:
|
||||||
|
if e := populateSignedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
case ObjIDEncryptedData:
|
||||||
|
if e := populateEncryptedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg, nil
|
return msg, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ package revoke
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
@@ -89,35 +90,35 @@ func ldapURL(url string) bool {
|
|||||||
// - false, false: an error was encountered while checking revocations.
|
// - false, false: an error was encountered while checking revocations.
|
||||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||||
// - true, false: failure to check revocation status causes verification to fail
|
// - true, false: failure to check revocation status causes verification to fail.
|
||||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
func revCheck(cert *x509.Certificate) (bool, bool, error) {
|
||||||
for _, url := range cert.CRLDistributionPoints {
|
for _, url := range cert.CRLDistributionPoints {
|
||||||
if ldapURL(url) {
|
if ldapURL(url) {
|
||||||
log.Infof("skipping LDAP CRL: %s", url)
|
log.Infof("skipping LDAP CRL: %s", url)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
if rvk, ok2, err2 := certIsRevokedCRL(cert, url); !ok2 {
|
||||||
log.Warning("error checking revocation via CRL")
|
log.Warning("error checking revocation via CRL")
|
||||||
if HardFail {
|
if HardFail {
|
||||||
return true, false, err
|
return true, false, err2
|
||||||
}
|
}
|
||||||
return false, false, err
|
return false, false, err2
|
||||||
} else if revoked {
|
} else if rvk {
|
||||||
log.Info("certificate is revoked via CRL")
|
log.Info("certificate is revoked via CRL")
|
||||||
return true, true, err
|
return true, true, err2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
if rvk, ok2, err2 := certIsRevokedOCSP(cert, HardFail); !ok2 {
|
||||||
log.Warning("error checking revocation via OCSP")
|
log.Warning("error checking revocation via OCSP")
|
||||||
if HardFail {
|
if HardFail {
|
||||||
return true, false, err
|
return true, false, err2
|
||||||
}
|
}
|
||||||
return false, false, err
|
return false, false, err2
|
||||||
} else if revoked {
|
} else if rvk {
|
||||||
log.Info("certificate is revoked via OCSP")
|
log.Info("certificate is revoked via OCSP")
|
||||||
return true, true, err
|
return true, true, err2
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
@@ -125,13 +126,17 @@ func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
|||||||
|
|
||||||
// fetchCRL fetches and parses a CRL.
|
// fetchCRL fetches and parses a CRL.
|
||||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||||
resp, err := HTTPClient.Get(url)
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := HTTPClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if resp.StatusCode >= 300 {
|
if resp.StatusCode >= http.StatusMultipleChoices {
|
||||||
return nil, errors.New("failed to retrieve CRL")
|
return nil, errors.New("failed to retrieve CRL")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,12 +159,11 @@ func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return issuer
|
return issuer
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// check a cert against a specific CRL. Returns the same bool pair
|
// check a cert against a specific CRL. Returns the same bool pair
|
||||||
// as revCheck, plus an error if one occurred.
|
// as revCheck, plus an error if one occurred.
|
||||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
func certIsRevokedCRL(cert *x509.Certificate, url string) (bool, bool, error) {
|
||||||
crlLock.Lock()
|
crlLock.Lock()
|
||||||
crl, ok := CRLSet[url]
|
crl, ok := CRLSet[url]
|
||||||
if ok && crl == nil {
|
if ok && crl == nil {
|
||||||
@@ -187,10 +191,9 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
|||||||
|
|
||||||
// check CRL signature
|
// check CRL signature
|
||||||
if issuer != nil {
|
if issuer != nil {
|
||||||
err = crl.CheckSignatureFrom(issuer)
|
if sigErr := crl.CheckSignatureFrom(issuer); sigErr != nil {
|
||||||
if err != nil {
|
log.Warningf("failed to verify CRL: %v", sigErr)
|
||||||
log.Warningf("failed to verify CRL: %v", err)
|
return false, false, sigErr
|
||||||
return false, false, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,40 +202,44 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
|||||||
crlLock.Unlock()
|
crlLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, revoked := range crl.RevokedCertificates {
|
for _, entry := range crl.RevokedCertificateEntries {
|
||||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
if cert.SerialNumber.Cmp(entry.SerialNumber) == 0 {
|
||||||
log.Info("Serial number match: intermediate is revoked.")
|
log.Info("Serial number match: intermediate is revoked.")
|
||||||
return true, true, err
|
return true, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, true, err
|
return false, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||||
// expired and checks the CRL for the server.
|
// expired and checks the CRL for the server.
|
||||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
func VerifyCertificate(cert *x509.Certificate) (bool, bool) {
|
||||||
revoked, ok, _ = VerifyCertificateError(cert)
|
revoked, ok, _ := VerifyCertificateError(cert)
|
||||||
return revoked, ok
|
return revoked, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||||
// expired and checks the CRL for the server.
|
// expired and checks the CRL for the server.
|
||||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
func VerifyCertificateError(cert *x509.Certificate) (bool, bool, error) {
|
||||||
if !time.Now().Before(cert.NotAfter) {
|
if !time.Now().Before(cert.NotAfter) {
|
||||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||||
log.Info(msg)
|
log.Info(msg)
|
||||||
return true, true, fmt.Errorf(msg)
|
return true, true, errors.New(msg)
|
||||||
} else if !time.Now().After(cert.NotBefore) {
|
} else if !time.Now().After(cert.NotBefore) {
|
||||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||||
log.Info(msg)
|
log.Info(msg)
|
||||||
return true, true, fmt.Errorf(msg)
|
return true, true, errors.New(msg)
|
||||||
}
|
}
|
||||||
return revCheck(cert)
|
return revCheck(cert)
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||||
resp, err := HTTPClient.Get(url)
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := HTTPClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -255,8 +262,12 @@ var ocspOpts = ocsp.RequestOptions{
|
|||||||
Hash: crypto.SHA1,
|
Hash: crypto.SHA1,
|
||||||
}
|
}
|
||||||
|
|
||||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
const ocspGetURLMaxLen = 256
|
||||||
var err error
|
|
||||||
|
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (bool, bool, error) {
|
||||||
|
var revoked bool
|
||||||
|
var ok bool
|
||||||
|
var lastErr error
|
||||||
|
|
||||||
ocspURLs := leaf.OCSPServer
|
ocspURLs := leaf.OCSPServer
|
||||||
if len(ocspURLs) == 0 {
|
if len(ocspURLs) == 0 {
|
||||||
@@ -272,15 +283,16 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
|
|
||||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return revoked, ok, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, server := range ocspURLs {
|
for _, server := range ocspURLs {
|
||||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
resp, e := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||||
if err != nil {
|
if e != nil {
|
||||||
if strict {
|
if strict {
|
||||||
return revoked, ok, err
|
return false, false, e
|
||||||
}
|
}
|
||||||
|
lastErr = e
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,9 +304,9 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
revoked = true
|
revoked = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return revoked, ok, err
|
return revoked, ok, nil
|
||||||
}
|
}
|
||||||
return revoked, ok, err
|
return revoked, ok, lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendOCSPRequest attempts to request an OCSP response from the
|
// sendOCSPRequest attempts to request an OCSP response from the
|
||||||
@@ -303,12 +315,21 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var err error
|
var err error
|
||||||
if len(req) > 256 {
|
if len(req) > ocspGetURLMaxLen {
|
||||||
buf := bytes.NewBuffer(req)
|
buf := bytes.NewBuffer(req)
|
||||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodPost, server, buf)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
httpReq.Header.Set("Content-Type", "application/ocsp-request")
|
||||||
|
resp, err = HTTPClient.Do(httpReq)
|
||||||
} else {
|
} else {
|
||||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||||
resp, err = HTTPClient.Get(reqURL)
|
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodGet, reqURL, nil)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
resp, err = HTTPClient.Do(httpReq)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -343,21 +364,21 @@ func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate)
|
|||||||
|
|
||||||
var crlRead = io.ReadAll
|
var crlRead = io.ReadAll
|
||||||
|
|
||||||
// SetCRLFetcher sets the function to use to read from the http response body
|
// SetCRLFetcher sets the function to use to read from the http response body.
|
||||||
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
crlRead = fn
|
crlRead = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
var remoteRead = io.ReadAll
|
var remoteRead = io.ReadAll
|
||||||
|
|
||||||
// SetRemoteFetcher sets the function to use to read from the http response body
|
// SetRemoteFetcher sets the function to use to read from the http response body.
|
||||||
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
remoteRead = fn
|
remoteRead = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
var ocspRead = io.ReadAll
|
var ocspRead = io.ReadAll
|
||||||
|
|
||||||
// SetOCSPFetcher sets the function to use to read from the http response body
|
// SetOCSPFetcher sets the function to use to read from the http response body.
|
||||||
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
ocspRead = fn
|
ocspRead = fn
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
package revoke
|
package revoke
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -50,7 +51,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
// to indicate that this is the case.
|
// to indicate that this is the case.
|
||||||
|
|
||||||
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
||||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
|
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt.
|
||||||
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||||
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
||||||
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
||||||
@@ -80,7 +81,7 @@ sESPRwHkcMUNdAp37FLweUw=
|
|||||||
|
|
||||||
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
||||||
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
||||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
|
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt.
|
||||||
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||||
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
||||||
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
||||||
@@ -106,7 +107,7 @@ Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
|
|||||||
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
||||||
-----END CERTIFICATE-----`)
|
-----END CERTIFICATE-----`)
|
||||||
|
|
||||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
|
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url.
|
||||||
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
||||||
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
||||||
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
||||||
@@ -153,7 +154,7 @@ func mustParse(pemData string) *x509.Certificate {
|
|||||||
panic("Invalid PEM type.")
|
panic("Invalid PEM type.")
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
panic(err.Error())
|
||||||
}
|
}
|
||||||
@@ -182,7 +183,6 @@ func TestGood(t *testing.T) {
|
|||||||
} else if revoked {
|
} else if revoked {
|
||||||
t.Fatalf("good certificate should not have been marked as revoked")
|
t.Fatalf("good certificate should not have been marked as revoked")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLdap(t *testing.T) {
|
func TestLdap(t *testing.T) {
|
||||||
@@ -230,7 +230,6 @@ func TestBadCRLSet(t *testing.T) {
|
|||||||
t.Fatalf("key emptystring should be deleted from CRLSet")
|
t.Fatalf("key emptystring should be deleted from CRLSet")
|
||||||
}
|
}
|
||||||
delete(CRLSet, "")
|
delete(CRLSet, "")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCachedCRLSet(t *testing.T) {
|
func TestCachedCRLSet(t *testing.T) {
|
||||||
@@ -241,13 +240,11 @@ func TestCachedCRLSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteFetchError(t *testing.T) {
|
func TestRemoteFetchError(t *testing.T) {
|
||||||
|
|
||||||
badurl := ":"
|
badurl := ":"
|
||||||
|
|
||||||
if _, err := fetchRemote(badurl); err == nil {
|
if _, err := fetchRemote(badurl); err == nil {
|
||||||
t.Fatalf("fetching bad url should result in non-nil error")
|
t.Fatalf("fetching bad url should result in non-nil error")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoOCSPServers(t *testing.T) {
|
func TestNoOCSPServers(t *testing.T) {
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@@ -28,10 +29,16 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
|||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
||||||
os.Stdout.Sync()
|
if err = os.Stdout.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := net.DialTimeout(proto, addr, timeout)
|
dialer := &net.Dialer{
|
||||||
|
Timeout: timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.DialContext(context.Background(), proto, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Println("failed.")
|
fmt.Println("failed.")
|
||||||
@@ -42,8 +49,8 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
|||||||
if verbose {
|
if verbose {
|
||||||
fmt.Println("OK")
|
fmt.Println("OK")
|
||||||
}
|
}
|
||||||
conn.Close()
|
|
||||||
return nil
|
return conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
40
cmd/ca-signed/README.txt
Normal file
40
cmd/ca-signed/README.txt
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
ca-signed: verify certificates against a CA
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
Description
|
||||||
|
ca-signed verifies whether one or more certificates are signed by a given
|
||||||
|
Certificate Authority (CA). It prints a concise status per input certificate
|
||||||
|
along with the certificate’s expiration date when validation succeeds.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
ca-signed CA.pem cert1.pem [cert2.pem ...]
|
||||||
|
|
||||||
|
- CA.pem: A file containing one or more CA certificates in PEM, DER, or PKCS#7/PKCS#12 formats.
|
||||||
|
- certN.pem: A file containing the end-entity (leaf) certificate to verify. If the file contains a chain,
|
||||||
|
the first certificate is treated as the leaf and the remaining ones are used as intermediates.
|
||||||
|
|
||||||
|
Output format
|
||||||
|
For each input certificate file, one line is printed:
|
||||||
|
<filename>: OK (expires YYYY-MM-DD)
|
||||||
|
<filename>: INVALID
|
||||||
|
|
||||||
|
Special self-test mode
|
||||||
|
ca-signed selftest
|
||||||
|
|
||||||
|
Runs a built-in test suite using embedded certificates. This mode requires no
|
||||||
|
external files or network access. The program exits with code 0 if all tests
|
||||||
|
pass, or a non-zero exit code if any test fails. Example output lines include
|
||||||
|
whether validation succeeds and the leaf’s expiration when applicable.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
# Verify a server certificate against a root CA
|
||||||
|
ca-signed isrg-root-x1.pem le-e7.pem
|
||||||
|
|
||||||
|
# Run the embedded self-test suite
|
||||||
|
ca-signed selftest
|
||||||
|
|
||||||
|
Notes
|
||||||
|
- The tool attempts to parse certificates in PEM first, then falls back to
|
||||||
|
DER/PKCS#7/PKCS#12 (with an empty password) where applicable.
|
||||||
|
- Expiration is shown for the leaf certificate only.
|
||||||
|
- In selftest mode, test certificates are compiled into the binary using go:embed.
|
||||||
325
cmd/ca-signed/main.go
Normal file
325
cmd/ca-signed/main.go
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"embed"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
||||||
|
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
||||||
|
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
||||||
|
var certs []*x509.Certificate
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if certs, err = certlib.ParseCertificatesPEM(data); err == nil {
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if certs, _, err = certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
||||||
|
// Try PEM via helper (it builds a pool)
|
||||||
|
if pool, err := certlib.LoadPEMCertPool(path); err == nil && pool != nil {
|
||||||
|
return pool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: read as DER(s), add to a new pool
|
||||||
|
certs, err := loadCertsFromFile(path)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
return nil, fmt.Errorf("failed to load CA certificates from %s", path)
|
||||||
|
}
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
for _, c := range certs {
|
||||||
|
pool.AddCert(c)
|
||||||
|
}
|
||||||
|
return pool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:embed testdata/*.pem
|
||||||
|
var embeddedTestdata embed.FS
|
||||||
|
|
||||||
|
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
||||||
|
// PEM or DER/PKCS#7 format.
|
||||||
|
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
||||||
|
certs, err := certlib.ParseCertificatesPEM(data)
|
||||||
|
if err == nil {
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
certs, _, err = certlib.ParseCertificatesDER(data, "")
|
||||||
|
if err == nil {
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
||||||
|
certs, err := loadCertsFromBytes(data)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
return nil, errors.New("failed to load CA certificates from embedded bytes")
|
||||||
|
}
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
for _, c := range certs {
|
||||||
|
pool.AddCert(c)
|
||||||
|
}
|
||||||
|
return pool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSelfSigned returns true if the given certificate is self-signed.
|
||||||
|
// It checks that the subject and issuer match and that the certificate's
|
||||||
|
// signature verifies against its own public key.
|
||||||
|
func isSelfSigned(cert *x509.Certificate) bool {
|
||||||
|
if cert == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Quick check: subject and issuer match
|
||||||
|
if cert.Subject.String() != cert.Issuer.String() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Cryptographic check: the certificate is signed by itself
|
||||||
|
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyAgainstCA(caPool *x509.CertPool, path string) (bool, string) {
|
||||||
|
certs, err := loadCertsFromFile(path)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
leaf := certs[0]
|
||||||
|
ints := x509.NewCertPool()
|
||||||
|
if len(certs) > 1 {
|
||||||
|
for _, ic := range certs[1:] {
|
||||||
|
ints.AddCert(ic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := x509.VerifyOptions{
|
||||||
|
Roots: caPool,
|
||||||
|
Intermediates: ints,
|
||||||
|
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||||
|
}
|
||||||
|
if _, err = leaf.Verify(opts); err != nil {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, leaf.NotAfter.Format("2006-01-02")
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (bool, string) {
|
||||||
|
certs, err := loadCertsFromBytes(certData)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
leaf := certs[0]
|
||||||
|
ints := x509.NewCertPool()
|
||||||
|
if len(certs) > 1 {
|
||||||
|
for _, ic := range certs[1:] {
|
||||||
|
ints.AddCert(ic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := x509.VerifyOptions{
|
||||||
|
Roots: caPool,
|
||||||
|
Intermediates: ints,
|
||||||
|
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||||
|
}
|
||||||
|
if _, err = leaf.Verify(opts); err != nil {
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, leaf.NotAfter.Format("2006-01-02")
|
||||||
|
}
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
name string
|
||||||
|
caFile string
|
||||||
|
certFile string
|
||||||
|
expectOK bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc testCase) Run() error {
|
||||||
|
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.caFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.certFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pool, err := makePoolFromBytes(caBytes)
|
||||||
|
if err != nil || pool == nil {
|
||||||
|
return fmt.Errorf("selftest: failed to build CA pool for %s: %w", tc.caFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||||
|
if ok != tc.expectOK {
|
||||||
|
return fmt.Errorf("%s: unexpected result: got %v, want %v", tc.name, ok, tc.expectOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cases = []testCase{
|
||||||
|
{
|
||||||
|
name: "ISRG Root X1 validates LE E7",
|
||||||
|
caFile: "testdata/isrg-root-x1.pem",
|
||||||
|
certFile: "testdata/le-e7.pem",
|
||||||
|
expectOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ISRG Root X1 does NOT validate Google WR2",
|
||||||
|
caFile: "testdata/isrg-root-x1.pem",
|
||||||
|
certFile: "testdata/goog-wr2.pem",
|
||||||
|
expectOK: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GTS R1 validates Google WR2",
|
||||||
|
caFile: "testdata/gts-r1.pem",
|
||||||
|
certFile: "testdata/goog-wr2.pem",
|
||||||
|
expectOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GTS R1 does NOT validate LE E7",
|
||||||
|
caFile: "testdata/gts-r1.pem",
|
||||||
|
certFile: "testdata/le-e7.pem",
|
||||||
|
expectOK: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// selftest runs built-in validation using embedded certificates.
|
||||||
|
func selftest() int {
|
||||||
|
failures := 0
|
||||||
|
for _, tc := range cases {
|
||||||
|
err := tc.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
failures++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that both embedded root CAs are detected as self-signed
|
||||||
|
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||||
|
for _, root := range roots {
|
||||||
|
b, err := embeddedTestdata.ReadFile(root)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||||
|
failures++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
certs, err := loadCertsFromBytes(b)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||||
|
failures++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
leaf := certs[0]
|
||||||
|
if isSelfSigned(leaf) {
|
||||||
|
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||||
|
failures++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if failures == 0 {
|
||||||
|
fmt.Println("selftest: PASS")
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// expiryString returns a YYYY-MM-DD date string to display for certificate
|
||||||
|
// expiry. If an explicit exp string is provided, it is used. Otherwise, if a
|
||||||
|
// leaf certificate is available, its NotAfter is formatted. As a last resort,
|
||||||
|
// it falls back to today's date (should not normally happen).
|
||||||
|
func expiryString(leaf *x509.Certificate, exp string) string {
|
||||||
|
if exp != "" {
|
||||||
|
return exp
|
||||||
|
}
|
||||||
|
if leaf != nil {
|
||||||
|
return leaf.NotAfter.Format("2006-01-02")
|
||||||
|
}
|
||||||
|
return time.Now().Format("2006-01-02")
|
||||||
|
}
|
||||||
|
|
||||||
|
// processCert verifies a single certificate file against the provided CA pool
|
||||||
|
// and prints the result in the required format, handling self-signed
|
||||||
|
// certificates specially.
|
||||||
|
func processCert(caPool *x509.CertPool, certPath string) {
|
||||||
|
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||||
|
name := filepath.Base(certPath)
|
||||||
|
|
||||||
|
// Try to load the leaf cert for self-signed detection and expiry fallback
|
||||||
|
var leaf *x509.Certificate
|
||||||
|
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||||
|
leaf = certs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefer the SELF-SIGNED label if applicable
|
||||||
|
if isSelfSigned(leaf) {
|
||||||
|
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
fmt.Printf("%s: OK (expires %s)\n", name, expiryString(leaf, exp))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("%s: INVALID\n", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Special selftest mode: single argument "selftest"
|
||||||
|
if len(os.Args) == 2 && os.Args[1] == "selftest" {
|
||||||
|
os.Exit(selftest())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
prog := filepath.Base(os.Args[0])
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage:\n %s ca.pem cert1.pem cert2.pem ...\n %s selftest\n", prog, prog)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
caPath := os.Args[1]
|
||||||
|
caPool, err := makePoolFromFile(caPath)
|
||||||
|
if err != nil || caPool == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "failed to load CA certificate(s): %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, certPath := range os.Args[2:] {
|
||||||
|
processCert(caPool, certPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
29
cmd/ca-signed/testdata/goog-wr2.pem
vendored
Normal file
29
cmd/ca-signed/testdata/goog-wr2.pem
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFCzCCAvOgAwIBAgIQf/AFoHxM3tEArZ1mpRB7mDANBgkqhkiG9w0BAQsFADBH
|
||||||
|
MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
|
||||||
|
QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMjMxMjEzMDkwMDAwWhcNMjkwMjIw
|
||||||
|
MTQwMDAwWjA7MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNl
|
||||||
|
cnZpY2VzMQwwCgYDVQQDEwNXUjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||||
|
AoIBAQCp/5x/RR5wqFOfytnlDd5GV1d9vI+aWqxG8YSau5HbyfsvAfuSCQAWXqAc
|
||||||
|
+MGr+XgvSszYhaLYWTwO0xj7sfUkDSbutltkdnwUxy96zqhMt/TZCPzfhyM1IKji
|
||||||
|
aeKMTj+xWfpgoh6zySBTGYLKNlNtYE3pAJH8do1cCA8Kwtzxc2vFE24KT3rC8gIc
|
||||||
|
LrRjg9ox9i11MLL7q8Ju26nADrn5Z9TDJVd06wW06Y613ijNzHoU5HEDy01hLmFX
|
||||||
|
xRmpC5iEGuh5KdmyjS//V2pm4M6rlagplmNwEmceOuHbsCFx13ye/aoXbv4r+zgX
|
||||||
|
FNFmp6+atXDMyGOBOozAKql2N87jAgMBAAGjgf4wgfswDgYDVR0PAQH/BAQDAgGG
|
||||||
|
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/
|
||||||
|
AgEAMB0GA1UdDgQWBBTeGx7teRXUPjckwyG77DQ5bUKyMDAfBgNVHSMEGDAWgBTk
|
||||||
|
rysmcRorSCeFL1JmLO/wiRNxPjA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAKG
|
||||||
|
GGh0dHA6Ly9pLnBraS5nb29nL3IxLmNydDArBgNVHR8EJDAiMCCgHqAchhpodHRw
|
||||||
|
Oi8vYy5wa2kuZ29vZy9yL3IxLmNybDATBgNVHSAEDDAKMAgGBmeBDAECATANBgkq
|
||||||
|
hkiG9w0BAQsFAAOCAgEARXWL5R87RBOWGqtY8TXJbz3S0DNKhjO6V1FP7sQ02hYS
|
||||||
|
TL8Tnw3UVOlIecAwPJQl8hr0ujKUtjNyC4XuCRElNJThb0Lbgpt7fyqaqf9/qdLe
|
||||||
|
SiDLs/sDA7j4BwXaWZIvGEaYzq9yviQmsR4ATb0IrZNBRAq7x9UBhb+TV+PfdBJT
|
||||||
|
DhEl05vc3ssnbrPCuTNiOcLgNeFbpwkuGcuRKnZc8d/KI4RApW//mkHgte8y0YWu
|
||||||
|
ryUJ8GLFbsLIbjL9uNrizkqRSvOFVU6xddZIMy9vhNkSXJ/UcZhjJY1pXAprffJB
|
||||||
|
vei7j+Qi151lRehMCofa6WBmiA4fx+FOVsV2/7R6V2nyAiIJJkEd2nSi5SnzxJrl
|
||||||
|
Xdaqev3htytmOPvoKWa676ATL/hzfvDaQBEcXd2Ppvy+275W+DKcH0FBbX62xevG
|
||||||
|
iza3F4ydzxl6NJ8hk8R+dDXSqv1MbRT1ybB5W0k8878XSOjvmiYTDIfyc9acxVJr
|
||||||
|
Y/cykHipa+te1pOhv7wYPYtZ9orGBV5SGOJm4NrB3K1aJar0RfzxC3ikr7Dyc6Qw
|
||||||
|
qDTBU39CluVIQeuQRgwG3MuSxl7zRERDRilGoKb8uY45JzmxWuKxrfwT/478JuHU
|
||||||
|
/oTxUFqOl2stKnn7QGTq8z29W+GgBLCXSBxC9epaHM0myFH/FJlniXJfHeytWt0=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
31
cmd/ca-signed/testdata/gts-r1.pem
vendored
Normal file
31
cmd/ca-signed/testdata/gts-r1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
|
||||||
|
CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
|
||||||
|
MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
|
||||||
|
MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
|
||||||
|
Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
|
||||||
|
A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
|
||||||
|
27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
|
||||||
|
Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
|
||||||
|
TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
|
||||||
|
qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
|
||||||
|
szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
|
||||||
|
Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
|
||||||
|
MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
|
||||||
|
wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
|
||||||
|
aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
|
||||||
|
VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
|
||||||
|
AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||||
|
FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
|
||||||
|
C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
|
||||||
|
QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
|
||||||
|
h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
|
||||||
|
7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
|
||||||
|
ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
|
||||||
|
MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
|
||||||
|
Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
|
||||||
|
6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
|
||||||
|
0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
|
||||||
|
2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
|
||||||
|
bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
|
||||||
|
-----END CERTIFICATE-----
|
||||||
31
cmd/ca-signed/testdata/isrg-root-x1.pem
vendored
Normal file
31
cmd/ca-signed/testdata/isrg-root-x1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
||||||
|
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||||
|
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
||||||
|
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
||||||
|
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
||||||
|
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
||||||
|
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
||||||
|
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
||||||
|
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
||||||
|
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
||||||
|
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
||||||
|
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
||||||
|
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
||||||
|
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
||||||
|
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
||||||
|
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
||||||
|
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
||||||
|
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
||||||
|
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
||||||
|
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
||||||
|
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
||||||
|
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
||||||
|
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
||||||
|
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
||||||
|
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
||||||
|
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
||||||
|
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
||||||
|
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
||||||
|
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
26
cmd/ca-signed/testdata/le-e7.pem
vendored
Normal file
26
cmd/ca-signed/testdata/le-e7.pem
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw
|
||||||
|
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||||
|
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw
|
||||||
|
WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
|
||||||
|
RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST
|
||||||
|
CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef
|
||||||
|
QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw
|
||||||
|
gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
|
||||||
|
ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4
|
||||||
|
wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB
|
||||||
|
AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g
|
||||||
|
BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu
|
||||||
|
Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD
|
||||||
|
aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF
|
||||||
|
h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG
|
||||||
|
yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr
|
||||||
|
OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o
|
||||||
|
yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S
|
||||||
|
M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ
|
||||||
|
UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq
|
||||||
|
Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I
|
||||||
|
tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ
|
||||||
|
YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty
|
||||||
|
+VUwFj9tmWxyR/M=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
148
cmd/cert-bundler/README.txt
Normal file
148
cmd/cert-bundler/README.txt
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
cert-bundler: create certificate chain archives
|
||||||
|
------------------------------------------------
|
||||||
|
|
||||||
|
Description
|
||||||
|
cert-bundler creates archives of certificate chains from a YAML configuration
|
||||||
|
file. It validates certificates, checks expiration dates, and generates
|
||||||
|
archives in multiple formats (zip, tar.gz) with optional manifest files
|
||||||
|
containing SHA256 checksums.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
cert-bundler [options]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-c <file> Path to YAML configuration file (default: bundle.yaml)
|
||||||
|
-o <dir> Output directory for archives (default: pkg)
|
||||||
|
|
||||||
|
YAML Configuration Format
|
||||||
|
|
||||||
|
The configuration file uses the following structure:
|
||||||
|
|
||||||
|
config:
|
||||||
|
hashes: <filename>
|
||||||
|
expiry: <duration>
|
||||||
|
chains:
|
||||||
|
<group_name>:
|
||||||
|
certs:
|
||||||
|
- root: <path>
|
||||||
|
intermediates:
|
||||||
|
- <path>
|
||||||
|
- <path>
|
||||||
|
- root: <path>
|
||||||
|
intermediates:
|
||||||
|
- <path>
|
||||||
|
outputs:
|
||||||
|
include_single: <bool>
|
||||||
|
include_individual: <bool>
|
||||||
|
manifest: <bool>
|
||||||
|
encoding: <encoding>
|
||||||
|
formats:
|
||||||
|
- <format>
|
||||||
|
- <format>
|
||||||
|
|
||||||
|
Configuration Fields
|
||||||
|
|
||||||
|
config:
|
||||||
|
hashes: (optional) Name of the file to write SHA256 checksums of all
|
||||||
|
generated archives. If omitted, no hash file is created.
|
||||||
|
expiry: (optional) Expiration warning threshold. Certificates expiring
|
||||||
|
within this period will trigger a warning. Supports formats like
|
||||||
|
"1y" (year), "6m" (month), "30d" (day). Default: 1y
|
||||||
|
|
||||||
|
chains:
|
||||||
|
Each key under "chains" defines a named certificate group. All certificates
|
||||||
|
in a group are bundled together into archives with the group name.
|
||||||
|
|
||||||
|
certs:
|
||||||
|
List of certificate chains. Each chain has:
|
||||||
|
root: Path to root CA certificate (PEM or DER format)
|
||||||
|
intermediates: List of paths to intermediate certificates
|
||||||
|
|
||||||
|
All intermediates are validated against their root CA. An error is
|
||||||
|
reported if signature verification fails.
|
||||||
|
|
||||||
|
outputs:
|
||||||
|
Defines output formats and content for the group's archives:
|
||||||
|
|
||||||
|
include_single: (bool) If true, all certificates in the group are
|
||||||
|
concatenated into a single file named "bundle.pem"
|
||||||
|
(or "bundle.crt" for DER encoding).
|
||||||
|
|
||||||
|
include_individual: (bool) If true, each certificate is included as
|
||||||
|
a separate file in the archive, named after the
|
||||||
|
original file (e.g., "int/cca2.pem" becomes
|
||||||
|
"cca2.pem").
|
||||||
|
|
||||||
|
manifest: (bool) If true, a MANIFEST file is included containing
|
||||||
|
SHA256 checksums of all files in the archive.
|
||||||
|
|
||||||
|
encoding: Specifies certificate encoding in the archive:
|
||||||
|
- "pem": PEM format with .pem extension (default)
|
||||||
|
- "der": DER format with .crt extension
|
||||||
|
- "both": Both PEM and DER versions are included
|
||||||
|
|
||||||
|
formats: List of archive formats to generate:
|
||||||
|
- "zip": Creates a .zip archive
|
||||||
|
- "tgz": Creates a .tar.gz archive
|
||||||
|
|
||||||
|
Output Files
|
||||||
|
|
||||||
|
For each group and format combination, an archive is created:
|
||||||
|
<group_name>.zip or <group_name>.tar.gz
|
||||||
|
|
||||||
|
If config.hashes is specified, a hash file is created in the output directory
|
||||||
|
containing SHA256 checksums of all generated archives.
|
||||||
|
|
||||||
|
Example Configuration
|
||||||
|
|
||||||
|
config:
|
||||||
|
hashes: bundle.sha256
|
||||||
|
expiry: 1y
|
||||||
|
chains:
|
||||||
|
core_certs:
|
||||||
|
certs:
|
||||||
|
- root: roots/core-ca.pem
|
||||||
|
intermediates:
|
||||||
|
- int/cca1.pem
|
||||||
|
- int/cca2.pem
|
||||||
|
- int/cca3.pem
|
||||||
|
- root: roots/ssh-ca.pem
|
||||||
|
intermediates:
|
||||||
|
- ssh/ssh_dmz1.pem
|
||||||
|
- ssh/ssh_internal.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: true
|
||||||
|
manifest: true
|
||||||
|
encoding: pem
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
|
|
||||||
|
This configuration:
|
||||||
|
- Creates core_certs.zip and core_certs.tar.gz in the output directory
|
||||||
|
- Each archive contains bundle.pem (all certificates concatenated)
|
||||||
|
- Each archive contains individual certificates (core-ca.pem, cca1.pem, etc.)
|
||||||
|
- Each archive includes a MANIFEST file with SHA256 checksums
|
||||||
|
- Creates bundle.sha256 with checksums of the two archives
|
||||||
|
- Warns if any certificate expires within 1 year
|
||||||
|
|
||||||
|
Examples
|
||||||
|
|
||||||
|
# Create bundles using default configuration (bundle.yaml -> pkg/)
|
||||||
|
cert-bundler
|
||||||
|
|
||||||
|
# Use custom configuration and output directory
|
||||||
|
cert-bundler -c myconfig.yaml -o output
|
||||||
|
|
||||||
|
# Create bundles from testdata configuration
|
||||||
|
cert-bundler -c testdata/bundle.yaml -o testdata/pkg
|
||||||
|
|
||||||
|
Notes
|
||||||
|
- Certificate paths in the YAML are relative to the current working directory
|
||||||
|
- All intermediates must be properly signed by their specified root CA
|
||||||
|
- Certificates are checked for expiration; warnings are printed to stderr
|
||||||
|
- Expired certificates do not prevent archive creation but generate warnings
|
||||||
|
- Both PEM and DER certificate formats are supported as input
|
||||||
|
- Archive filenames use the group name, not individual chain names
|
||||||
|
- If both include_single and include_individual are true, archives contain both
|
||||||
575
cmd/cert-bundler/main.go
Normal file
575
cmd/cert-bundler/main.go
Normal file
@@ -0,0 +1,575 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/x509"
|
||||||
|
_ "embed"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config represents the top-level YAML configuration.
|
||||||
|
type Config struct {
|
||||||
|
Config struct {
|
||||||
|
Hashes string `yaml:"hashes"`
|
||||||
|
Expiry string `yaml:"expiry"`
|
||||||
|
} `yaml:"config"`
|
||||||
|
Chains map[string]ChainGroup `yaml:"chains"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainGroup represents a named group of certificate chains.
|
||||||
|
type ChainGroup struct {
|
||||||
|
Certs []CertChain `yaml:"certs"`
|
||||||
|
Outputs Outputs `yaml:"outputs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CertChain represents a root certificate and its intermediates.
|
||||||
|
type CertChain struct {
|
||||||
|
Root string `yaml:"root"`
|
||||||
|
Intermediates []string `yaml:"intermediates"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outputs defines output format options.
|
||||||
|
type Outputs struct {
|
||||||
|
IncludeSingle bool `yaml:"include_single"`
|
||||||
|
IncludeIndividual bool `yaml:"include_individual"`
|
||||||
|
Manifest bool `yaml:"manifest"`
|
||||||
|
Formats []string `yaml:"formats"`
|
||||||
|
Encoding string `yaml:"encoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
configFile string
|
||||||
|
outputDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
var formatExtensions = map[string]string{
|
||||||
|
"zip": ".zip",
|
||||||
|
"tgz": ".tar.gz",
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:embed README.txt
|
||||||
|
var readmeContent string
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprint(os.Stderr, readmeContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.StringVar(&configFile, "c", "bundle.yaml", "path to YAML configuration file")
|
||||||
|
flag.StringVar(&outputDir, "o", "pkg", "output directory for archives")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if configFile == "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: configuration file required (-c flag)\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load and parse configuration
|
||||||
|
cfg, err := loadConfig(configFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse expiry duration (default 1 year)
|
||||||
|
expiryDuration := 365 * 24 * time.Hour
|
||||||
|
if cfg.Config.Expiry != "" {
|
||||||
|
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output directory if it doesn't exist
|
||||||
|
err = os.MkdirAll(outputDir, 0750)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each chain group
|
||||||
|
// Pre-allocate createdFiles based on total number of formats across all groups
|
||||||
|
totalFormats := 0
|
||||||
|
for _, group := range cfg.Chains {
|
||||||
|
totalFormats += len(group.Outputs.Formats)
|
||||||
|
}
|
||||||
|
createdFiles := make([]string, 0, totalFormats)
|
||||||
|
for groupName, group := range cfg.Chains {
|
||||||
|
files, perr := processChainGroup(groupName, group, expiryDuration)
|
||||||
|
if perr != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
createdFiles = append(createdFiles, files...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate hash file for all created archives
|
||||||
|
if cfg.Config.Hashes != "" {
|
||||||
|
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||||
|
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Certificate bundling completed successfully")
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadConfig(path string) (*Config, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg Config
|
||||||
|
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||||
|
return nil, uerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDuration(s string) (time.Duration, error) {
|
||||||
|
// Support simple formats like "1y", "6m", "30d"
|
||||||
|
if len(s) < 2 {
|
||||||
|
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
unit := s[len(s)-1]
|
||||||
|
value := s[:len(s)-1]
|
||||||
|
|
||||||
|
var multiplier time.Duration
|
||||||
|
switch unit {
|
||||||
|
case 'y', 'Y':
|
||||||
|
multiplier = 365 * 24 * time.Hour
|
||||||
|
case 'm', 'M':
|
||||||
|
multiplier = 30 * 24 * time.Hour
|
||||||
|
case 'd', 'D':
|
||||||
|
multiplier = 24 * time.Hour
|
||||||
|
default:
|
||||||
|
return time.ParseDuration(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
var num int
|
||||||
|
_, err := fmt.Sscanf(value, "%d", &num)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Duration(num) * multiplier, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
|
||||||
|
// Default encoding to "pem" if not specified
|
||||||
|
encoding := group.Outputs.Encoding
|
||||||
|
if encoding == "" {
|
||||||
|
encoding = "pem"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect certificates from all chains in the group
|
||||||
|
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare files for inclusion in archives
|
||||||
|
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create archives for the entire group
|
||||||
|
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return createdFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||||
|
func loadAndCollectCerts(
|
||||||
|
chains []CertChain,
|
||||||
|
outputs Outputs,
|
||||||
|
expiryDuration time.Duration,
|
||||||
|
) ([]*x509.Certificate, []certWithPath, error) {
|
||||||
|
var singleFileCerts []*x509.Certificate
|
||||||
|
var individualCerts []certWithPath
|
||||||
|
|
||||||
|
for _, chain := range chains {
|
||||||
|
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||||
|
if cerr != nil {
|
||||||
|
return nil, nil, cerr
|
||||||
|
}
|
||||||
|
if len(s) > 0 {
|
||||||
|
singleFileCerts = append(singleFileCerts, s...)
|
||||||
|
}
|
||||||
|
if len(i) > 0 {
|
||||||
|
individualCerts = append(individualCerts, i...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return singleFileCerts, individualCerts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||||
|
func collectFromChain(
|
||||||
|
chain CertChain,
|
||||||
|
outputs Outputs,
|
||||||
|
expiryDuration time.Duration,
|
||||||
|
) (
|
||||||
|
[]*x509.Certificate,
|
||||||
|
[]certWithPath,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
var single []*x509.Certificate
|
||||||
|
var indiv []certWithPath
|
||||||
|
|
||||||
|
// Load root certificate
|
||||||
|
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||||
|
if rerr != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiry for root
|
||||||
|
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||||
|
|
||||||
|
// Add root to collections if needed
|
||||||
|
if outputs.IncludeSingle {
|
||||||
|
single = append(single, rootCert)
|
||||||
|
}
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load and validate intermediates
|
||||||
|
for _, intPath := range chain.Intermediates {
|
||||||
|
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||||
|
if lerr != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate that intermediate is signed by root
|
||||||
|
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||||
|
return nil, nil, fmt.Errorf(
|
||||||
|
"intermediate %s is not properly signed by root %s: %w",
|
||||||
|
intPath,
|
||||||
|
chain.Root,
|
||||||
|
sigErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiry for intermediate
|
||||||
|
checkExpiry(intPath, intCert, expiryDuration)
|
||||||
|
|
||||||
|
// Add intermediate to collections if needed
|
||||||
|
if outputs.IncludeSingle {
|
||||||
|
single = append(single, intCert)
|
||||||
|
}
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return single, indiv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareArchiveFiles prepares all files to be included in archives.
|
||||||
|
func prepareArchiveFiles(
|
||||||
|
singleFileCerts []*x509.Certificate,
|
||||||
|
individualCerts []certWithPath,
|
||||||
|
outputs Outputs,
|
||||||
|
encoding string,
|
||||||
|
) ([]fileEntry, error) {
|
||||||
|
var archiveFiles []fileEntry
|
||||||
|
|
||||||
|
// Handle a single bundle file
|
||||||
|
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||||
|
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||||
|
}
|
||||||
|
archiveFiles = append(archiveFiles, files...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle individual files
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
for _, cp := range individualCerts {
|
||||||
|
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||||
|
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||||
|
}
|
||||||
|
archiveFiles = append(archiveFiles, files...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate manifest if requested
|
||||||
|
if outputs.Manifest {
|
||||||
|
manifestContent := generateManifest(archiveFiles)
|
||||||
|
archiveFiles = append(archiveFiles, fileEntry{
|
||||||
|
name: "MANIFEST",
|
||||||
|
content: manifestContent,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return archiveFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createArchiveFiles creates archive files in the specified formats.
|
||||||
|
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
|
||||||
|
createdFiles := make([]string, 0, len(formats))
|
||||||
|
|
||||||
|
for _, format := range formats {
|
||||||
|
ext, ok := formatExtensions[format]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||||
|
}
|
||||||
|
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||||
|
switch format {
|
||||||
|
case "zip":
|
||||||
|
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||||
|
}
|
||||||
|
case "tgz":
|
||||||
|
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||||
|
}
|
||||||
|
createdFiles = append(createdFiles, archivePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return createdFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||||
|
now := time.Now()
|
||||||
|
expiryThreshold := now.Add(expiryDuration)
|
||||||
|
|
||||||
|
if cert.NotAfter.Before(expiryThreshold) {
|
||||||
|
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||||
|
if daysUntilExpiry < 0 {
|
||||||
|
fmt.Fprintf(
|
||||||
|
os.Stderr,
|
||||||
|
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||||
|
path,
|
||||||
|
-daysUntilExpiry,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileEntry struct {
|
||||||
|
name string
|
||||||
|
content []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type certWithPath struct {
|
||||||
|
cert *x509.Certificate
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||||
|
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||||
|
func encodeCertsToFiles(
|
||||||
|
certs []*x509.Certificate,
|
||||||
|
baseName string,
|
||||||
|
encoding string,
|
||||||
|
isSingle bool,
|
||||||
|
) ([]fileEntry, error) {
|
||||||
|
var files []fileEntry
|
||||||
|
|
||||||
|
switch encoding {
|
||||||
|
case "pem":
|
||||||
|
pemContent := encodeCertsToPEM(certs)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".pem",
|
||||||
|
content: pemContent,
|
||||||
|
})
|
||||||
|
case "der":
|
||||||
|
if isSingle {
|
||||||
|
// For single file in DER, concatenate all cert DER bytes
|
||||||
|
var derContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
derContent = append(derContent, cert.Raw...)
|
||||||
|
}
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: derContent,
|
||||||
|
})
|
||||||
|
} else if len(certs) > 0 {
|
||||||
|
// Individual DER file (should only have one cert)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: certs[0].Raw,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case "both":
|
||||||
|
// Add PEM version
|
||||||
|
pemContent := encodeCertsToPEM(certs)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".pem",
|
||||||
|
content: pemContent,
|
||||||
|
})
|
||||||
|
// Add DER version
|
||||||
|
if isSingle {
|
||||||
|
var derContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
derContent = append(derContent, cert.Raw...)
|
||||||
|
}
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: derContent,
|
||||||
|
})
|
||||||
|
} else if len(certs) > 0 {
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: certs[0].Raw,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeCertsToPEM encodes certificates to PEM format.
|
||||||
|
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||||
|
var pemContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
pemBlock := &pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: cert.Raw,
|
||||||
|
}
|
||||||
|
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||||
|
}
|
||||||
|
return pemContent
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateManifest(files []fileEntry) []byte {
|
||||||
|
var manifest strings.Builder
|
||||||
|
for _, file := range files {
|
||||||
|
if file.name == "MANIFEST" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hash := sha256.Sum256(file.content)
|
||||||
|
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||||
|
}
|
||||||
|
return []byte(manifest.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||||
|
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||||
|
for _, c := range closers {
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cerr := c.Close(); cerr != nil {
|
||||||
|
baseErr = errors.Join(baseErr, cerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func createZipArchive(path string, files []fileEntry) error {
|
||||||
|
f, zerr := os.Create(path)
|
||||||
|
if zerr != nil {
|
||||||
|
return zerr
|
||||||
|
}
|
||||||
|
|
||||||
|
w := zip.NewWriter(f)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
fw, werr := w.Create(file.name)
|
||||||
|
if werr != nil {
|
||||||
|
return closeWithErr(werr, w, f)
|
||||||
|
}
|
||||||
|
if _, werr = fw.Write(file.content); werr != nil {
|
||||||
|
return closeWithErr(werr, w, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check errors on close operations
|
||||||
|
if cerr := w.Close(); cerr != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTarGzArchive(path string, files []fileEntry) error {
|
||||||
|
f, terr := os.Create(path)
|
||||||
|
if terr != nil {
|
||||||
|
return terr
|
||||||
|
}
|
||||||
|
|
||||||
|
gw := gzip.NewWriter(f)
|
||||||
|
tw := tar.NewWriter(gw)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: file.name,
|
||||||
|
Mode: 0644,
|
||||||
|
Size: int64(len(file.content)),
|
||||||
|
}
|
||||||
|
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||||
|
return closeWithErr(herr, tw, gw, f)
|
||||||
|
}
|
||||||
|
if _, werr := tw.Write(file.content); werr != nil {
|
||||||
|
return closeWithErr(werr, tw, gw, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check errors on close operations in the correct order
|
||||||
|
if cerr := tw.Close(); cerr != nil {
|
||||||
|
_ = gw.Close()
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
if cerr := gw.Close(); cerr != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateHashFile(path string, files []string) error {
|
||||||
|
f, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
data, rerr := os.ReadFile(file)
|
||||||
|
if rerr != nil {
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.Sum256(data)
|
||||||
|
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
197
cmd/cert-bundler/prompt.txt
Normal file
197
cmd/cert-bundler/prompt.txt
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
This project is an exploration into the utility of Jetbrains' Junie
|
||||||
|
to write smaller but tedious programs.
|
||||||
|
|
||||||
|
Task: build a certificate bundling tool in cmd/cert-bundler. It
|
||||||
|
creates archives of certificates chains.
|
||||||
|
|
||||||
|
A YAML file for this looks something like:
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
config:
|
||||||
|
hashes: bundle.sha256
|
||||||
|
expiry: 1y
|
||||||
|
chains:
|
||||||
|
core_certs:
|
||||||
|
certs:
|
||||||
|
- root: roots/core-ca.pem
|
||||||
|
intermediates:
|
||||||
|
- int/cca1.pem
|
||||||
|
- int/cca2.pem
|
||||||
|
- int/cca3.pem
|
||||||
|
- root: roots/ssh-ca.pem
|
||||||
|
intermediates:
|
||||||
|
- ssh/ssh_dmz1.pem
|
||||||
|
- ssh/ssh_internal.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: true
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
Some requirements:
|
||||||
|
|
||||||
|
1. First, all the certificates should be loaded.
|
||||||
|
2. For each root, each of the indivudal intermediates should be
|
||||||
|
checked to make sure they are properly signed by the root CA.
|
||||||
|
3. The program should optionally take an expiration period (defaulting
|
||||||
|
to one year), specified in config.expiration, and if any certificate
|
||||||
|
is within that expiration period, a warning should be printed.
|
||||||
|
4. If outputs.include_single is true, all certificates under chains
|
||||||
|
should be concatenated into a single file.
|
||||||
|
5. If outputs.include_individual is true, all certificates under
|
||||||
|
chains should be included at the root level (e.g. int/cca2.pem
|
||||||
|
would be cca2.pem in the archive).
|
||||||
|
6. If bundle.manifest is true, a "MANIFEST" file is created with
|
||||||
|
SHA256 sums of each file included in the archive.
|
||||||
|
7. For each of the formats, create an archive file in the output
|
||||||
|
directory (specified with `-o`) with that format.
|
||||||
|
- If zip is included, create a .zip file.
|
||||||
|
- If tgz is included, create a .tar.gz file with default compression
|
||||||
|
levels.
|
||||||
|
- All archive files should include any generated files (single
|
||||||
|
and/or individual) in the top-level directory.
|
||||||
|
8. In the output directory, create a file with the same name as
|
||||||
|
config.hashes that contains the SHA256 sum of all files created.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
The outputs.include_single and outputs.include_individual describe
|
||||||
|
what should go in the final archive. If both are specified, the output
|
||||||
|
archive should include both a single bundle.pem and each individual
|
||||||
|
certificate, for example.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
As it stands, given the following `bundle.yaml`:
|
||||||
|
|
||||||
|
``` yaml
|
||||||
|
config:
|
||||||
|
hashes: bundle.sha256
|
||||||
|
expiry: 1y
|
||||||
|
chains:
|
||||||
|
core_certs:
|
||||||
|
certs:
|
||||||
|
- root: pems/gts-r1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/goog-wr2.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: true
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
|
- root: pems/isrg-root-x1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/le-e7.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: false
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
|
google_certs:
|
||||||
|
certs:
|
||||||
|
- root: pems/gts-r1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/goog-wr2.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: false
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- tgz
|
||||||
|
lets_encrypt:
|
||||||
|
certs:
|
||||||
|
- root: pems/isrg-root-x1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/le-e7.pem
|
||||||
|
outputs:
|
||||||
|
include_single: false
|
||||||
|
include_individual: true
|
||||||
|
manifest: false
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
```
|
||||||
|
|
||||||
|
The program outputs the following files:
|
||||||
|
|
||||||
|
- bundle.sha256
|
||||||
|
- core_certs_0.tgz (contains individual certs)
|
||||||
|
- core_certs_0.zip (contains individual certs)
|
||||||
|
- core_certs_1.tgz (contains core_certs.pem)
|
||||||
|
- core_certs_1.zip (contains core_certs.pem)
|
||||||
|
- google_certs_0.tgz
|
||||||
|
- lets_encrypt_0.zip
|
||||||
|
|
||||||
|
It should output
|
||||||
|
|
||||||
|
- bundle.sha256
|
||||||
|
- core_certs.tgz
|
||||||
|
- core_certs.zip
|
||||||
|
- google_certs.tgz
|
||||||
|
- lets_encrypt.zip
|
||||||
|
|
||||||
|
core_certs.* should contain `bundle.pem` and all the individual
|
||||||
|
certs. There should be no _$n$ variants of archives.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
Add an additional field to outputs: encoding. It should accept one of
|
||||||
|
`der`, `pem`, or `both`. If `der`, certificates should be output as a
|
||||||
|
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
|
||||||
|
should be output as a `.pem` file containing a PEM-encoded certificate.
|
||||||
|
If both, both the `.crt` and `.pem` certificate should be included.
|
||||||
|
|
||||||
|
For example, given the previous config, if `encoding` is der, the
|
||||||
|
google_certs.tgz archive should contain
|
||||||
|
|
||||||
|
- bundle.crt
|
||||||
|
- MANIFEST
|
||||||
|
|
||||||
|
Or with lets_encrypt.zip:
|
||||||
|
|
||||||
|
- isrg-root-x1.crt
|
||||||
|
- le-e7.crt
|
||||||
|
|
||||||
|
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
|
||||||
|
|
||||||
|
- isrg-root-x1.pem
|
||||||
|
- le-e7.pem
|
||||||
|
|
||||||
|
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
|
||||||
|
|
||||||
|
- isrg-root-x1.crt
|
||||||
|
- isrg-root-x1.pem
|
||||||
|
- le-e7.crt
|
||||||
|
- le-e7.pem
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
Move the format extensions to a global variable.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
Write a README.txt with a description of the bundle.yaml format.
|
||||||
|
|
||||||
|
Additionally, update the help text for the program (e.g. with `-h`)
|
||||||
|
to provide the same detailed information.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
It may be easier to embed the README.txt in the program on build.
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
For the archive (tar.gz and zip) writers, make sure errors are
|
||||||
|
checked at the end, and don't just defer the close operations.
|
||||||
|
|
||||||
|
|
||||||
43
cmd/cert-bundler/testdata/bundle.yaml
vendored
Normal file
43
cmd/cert-bundler/testdata/bundle.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
config:
|
||||||
|
hashes: bundle.sha256
|
||||||
|
expiry: 1y
|
||||||
|
chains:
|
||||||
|
core_certs:
|
||||||
|
certs:
|
||||||
|
- root: pems/gts-r1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/goog-wr2.pem
|
||||||
|
- root: pems/isrg-root-x1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/le-e7.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: true
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
|
google_certs:
|
||||||
|
certs:
|
||||||
|
- root: pems/gts-r1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/goog-wr2.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: false
|
||||||
|
manifest: true
|
||||||
|
encoding: der
|
||||||
|
formats:
|
||||||
|
- tgz
|
||||||
|
lets_encrypt:
|
||||||
|
certs:
|
||||||
|
- root: pems/isrg-root-x1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/le-e7.pem
|
||||||
|
outputs:
|
||||||
|
include_single: false
|
||||||
|
include_individual: true
|
||||||
|
manifest: false
|
||||||
|
encoding: both
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
29
cmd/cert-bundler/testdata/pems/goog-wr2.pem
vendored
Normal file
29
cmd/cert-bundler/testdata/pems/goog-wr2.pem
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFCzCCAvOgAwIBAgIQf/AFoHxM3tEArZ1mpRB7mDANBgkqhkiG9w0BAQsFADBH
|
||||||
|
MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
|
||||||
|
QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMjMxMjEzMDkwMDAwWhcNMjkwMjIw
|
||||||
|
MTQwMDAwWjA7MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNl
|
||||||
|
cnZpY2VzMQwwCgYDVQQDEwNXUjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||||
|
AoIBAQCp/5x/RR5wqFOfytnlDd5GV1d9vI+aWqxG8YSau5HbyfsvAfuSCQAWXqAc
|
||||||
|
+MGr+XgvSszYhaLYWTwO0xj7sfUkDSbutltkdnwUxy96zqhMt/TZCPzfhyM1IKji
|
||||||
|
aeKMTj+xWfpgoh6zySBTGYLKNlNtYE3pAJH8do1cCA8Kwtzxc2vFE24KT3rC8gIc
|
||||||
|
LrRjg9ox9i11MLL7q8Ju26nADrn5Z9TDJVd06wW06Y613ijNzHoU5HEDy01hLmFX
|
||||||
|
xRmpC5iEGuh5KdmyjS//V2pm4M6rlagplmNwEmceOuHbsCFx13ye/aoXbv4r+zgX
|
||||||
|
FNFmp6+atXDMyGOBOozAKql2N87jAgMBAAGjgf4wgfswDgYDVR0PAQH/BAQDAgGG
|
||||||
|
MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/
|
||||||
|
AgEAMB0GA1UdDgQWBBTeGx7teRXUPjckwyG77DQ5bUKyMDAfBgNVHSMEGDAWgBTk
|
||||||
|
rysmcRorSCeFL1JmLO/wiRNxPjA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAKG
|
||||||
|
GGh0dHA6Ly9pLnBraS5nb29nL3IxLmNydDArBgNVHR8EJDAiMCCgHqAchhpodHRw
|
||||||
|
Oi8vYy5wa2kuZ29vZy9yL3IxLmNybDATBgNVHSAEDDAKMAgGBmeBDAECATANBgkq
|
||||||
|
hkiG9w0BAQsFAAOCAgEARXWL5R87RBOWGqtY8TXJbz3S0DNKhjO6V1FP7sQ02hYS
|
||||||
|
TL8Tnw3UVOlIecAwPJQl8hr0ujKUtjNyC4XuCRElNJThb0Lbgpt7fyqaqf9/qdLe
|
||||||
|
SiDLs/sDA7j4BwXaWZIvGEaYzq9yviQmsR4ATb0IrZNBRAq7x9UBhb+TV+PfdBJT
|
||||||
|
DhEl05vc3ssnbrPCuTNiOcLgNeFbpwkuGcuRKnZc8d/KI4RApW//mkHgte8y0YWu
|
||||||
|
ryUJ8GLFbsLIbjL9uNrizkqRSvOFVU6xddZIMy9vhNkSXJ/UcZhjJY1pXAprffJB
|
||||||
|
vei7j+Qi151lRehMCofa6WBmiA4fx+FOVsV2/7R6V2nyAiIJJkEd2nSi5SnzxJrl
|
||||||
|
Xdaqev3htytmOPvoKWa676ATL/hzfvDaQBEcXd2Ppvy+275W+DKcH0FBbX62xevG
|
||||||
|
iza3F4ydzxl6NJ8hk8R+dDXSqv1MbRT1ybB5W0k8878XSOjvmiYTDIfyc9acxVJr
|
||||||
|
Y/cykHipa+te1pOhv7wYPYtZ9orGBV5SGOJm4NrB3K1aJar0RfzxC3ikr7Dyc6Qw
|
||||||
|
qDTBU39CluVIQeuQRgwG3MuSxl7zRERDRilGoKb8uY45JzmxWuKxrfwT/478JuHU
|
||||||
|
/oTxUFqOl2stKnn7QGTq8z29W+GgBLCXSBxC9epaHM0myFH/FJlniXJfHeytWt0=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
31
cmd/cert-bundler/testdata/pems/gts-r1.pem
vendored
Normal file
31
cmd/cert-bundler/testdata/pems/gts-r1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
|
||||||
|
CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
|
||||||
|
MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
|
||||||
|
MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
|
||||||
|
Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
|
||||||
|
A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
|
||||||
|
27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
|
||||||
|
Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
|
||||||
|
TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
|
||||||
|
qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
|
||||||
|
szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
|
||||||
|
Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
|
||||||
|
MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
|
||||||
|
wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
|
||||||
|
aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
|
||||||
|
VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
|
||||||
|
AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||||
|
FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
|
||||||
|
C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
|
||||||
|
QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
|
||||||
|
h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
|
||||||
|
7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
|
||||||
|
ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
|
||||||
|
MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
|
||||||
|
Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
|
||||||
|
6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
|
||||||
|
0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
|
||||||
|
2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
|
||||||
|
bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
|
||||||
|
-----END CERTIFICATE-----
|
||||||
31
cmd/cert-bundler/testdata/pems/isrg-root-x1.pem
vendored
Normal file
31
cmd/cert-bundler/testdata/pems/isrg-root-x1.pem
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
||||||
|
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||||
|
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
||||||
|
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
||||||
|
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
||||||
|
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
||||||
|
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
||||||
|
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
||||||
|
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
||||||
|
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
||||||
|
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
||||||
|
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
||||||
|
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
||||||
|
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
||||||
|
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
||||||
|
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
||||||
|
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
||||||
|
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
||||||
|
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
||||||
|
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
||||||
|
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
||||||
|
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
||||||
|
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
||||||
|
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
||||||
|
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
||||||
|
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
||||||
|
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
||||||
|
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
||||||
|
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
26
cmd/cert-bundler/testdata/pems/le-e7.pem
vendored
Normal file
26
cmd/cert-bundler/testdata/pems/le-e7.pem
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw
|
||||||
|
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||||
|
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw
|
||||||
|
WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
|
||||||
|
RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST
|
||||||
|
CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef
|
||||||
|
QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw
|
||||||
|
gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD
|
||||||
|
ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4
|
||||||
|
wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB
|
||||||
|
AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g
|
||||||
|
BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu
|
||||||
|
Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD
|
||||||
|
aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF
|
||||||
|
h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG
|
||||||
|
yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr
|
||||||
|
OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o
|
||||||
|
yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S
|
||||||
|
M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ
|
||||||
|
UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq
|
||||||
|
Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I
|
||||||
|
tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ
|
||||||
|
YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty
|
||||||
|
+VUwFj9tmWxyR/M=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
Normal file
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
|
||||||
|
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
|
||||||
|
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
|
||||||
|
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip
|
||||||
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Normal file
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Normal file
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Normal file
Binary file not shown.
36
cmd/cert-revcheck/README.txt
Normal file
36
cmd/cert-revcheck/README.txt
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
cert-revcheck: check certificate expiry and revocation
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
Description
|
||||||
|
cert-revcheck accepts a list of certificate files (PEM or DER) or
|
||||||
|
site addresses (host[:port]) and checks whether the leaf certificate
|
||||||
|
is expired or revoked. Revocation checks use CRL and OCSP via the
|
||||||
|
certlib/revoke package.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
cert-revcheck [options] <target> [<target>...]
|
||||||
|
|
||||||
|
Options
|
||||||
|
-hardfail treat revocation check failures as fatal (default: false)
|
||||||
|
-timeout dur HTTP/OCSP/CRL timeout for network operations (default: 10s)
|
||||||
|
-v verbose output
|
||||||
|
|
||||||
|
Targets
|
||||||
|
- File paths to certificates in PEM or DER format.
|
||||||
|
- Site addresses in the form host or host:port. If no port is
|
||||||
|
provided, 443 is assumed.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
# Check a PEM file
|
||||||
|
cert-revcheck ./server.pem
|
||||||
|
|
||||||
|
# Check a DER (single) certificate
|
||||||
|
cert-revcheck ./server.der
|
||||||
|
|
||||||
|
# Check a live site (leaf certificate)
|
||||||
|
cert-revcheck example.com:443
|
||||||
|
|
||||||
|
Notes
|
||||||
|
- For sites, only the leaf certificate is checked.
|
||||||
|
- When -hardfail is set, network issues during OCSP/CRL fetch will
|
||||||
|
cause the check to fail (treated as revoked).
|
||||||
140
cmd/cert-revcheck/main.go
Normal file
140
cmd/cert-revcheck/main.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"flag"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
hosts "git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hardfail bool
|
||||||
|
timeout time.Duration
|
||||||
|
verbose bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.BoolVar(&hardfail, "hardfail", false, "treat revocation check failures as fatal")
|
||||||
|
flag.DurationVar(&timeout, "timeout", 10*time.Second, "network timeout for OCSP/CRL fetches and TLS site connects")
|
||||||
|
flag.BoolVar(&verbose, "v", false, "verbose output")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
revoke.HardFail = hardfail
|
||||||
|
// Set HTTP client timeout for revocation library
|
||||||
|
revoke.HTTPClient.Timeout = timeout
|
||||||
|
|
||||||
|
if flag.NArg() == 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage: %s [options] <target> [<target>...]\n", os.Args[0])
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
exitCode := 0
|
||||||
|
for _, target := range flag.Args() {
|
||||||
|
status, err := processTarget(target)
|
||||||
|
switch status {
|
||||||
|
case "OK":
|
||||||
|
fmt.Printf("%s: OK\n", target)
|
||||||
|
case "EXPIRED":
|
||||||
|
fmt.Printf("%s: EXPIRED: %v\n", target, err)
|
||||||
|
exitCode = 1
|
||||||
|
case "REVOKED":
|
||||||
|
fmt.Printf("%s: REVOKED\n", target)
|
||||||
|
exitCode = 1
|
||||||
|
case "UNKNOWN":
|
||||||
|
fmt.Printf("%s: UNKNOWN: %v\n", target, err)
|
||||||
|
if hardfail {
|
||||||
|
// In hardfail, treat unknown as failure
|
||||||
|
exitCode = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func processTarget(target string) (string, error) {
|
||||||
|
if fileutil.FileDoesExist(target) {
|
||||||
|
return checkFile(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a file; treat as site
|
||||||
|
return checkSite(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkFile(path string) (string, error) {
|
||||||
|
in, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "UNKNOWN", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try PEM first; if that fails, try single DER cert
|
||||||
|
certs, err := certlib.ReadCertificates(in)
|
||||||
|
if err != nil || len(certs) == 0 {
|
||||||
|
cert, _, derr := certlib.ReadCertificate(in)
|
||||||
|
if derr != nil || cert == nil {
|
||||||
|
if err == nil {
|
||||||
|
err = derr
|
||||||
|
}
|
||||||
|
return "UNKNOWN", err
|
||||||
|
}
|
||||||
|
return evaluateCert(cert)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate the first certificate (leaf) by default
|
||||||
|
return evaluateCert(certs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSite(hostport string) (string, error) {
|
||||||
|
// Use certlib/hosts to parse host/port (supports https URLs and host:port)
|
||||||
|
target, err := hosts.ParseHost(hostport)
|
||||||
|
if err != nil {
|
||||||
|
return "UNKNOWN", err
|
||||||
|
}
|
||||||
|
|
||||||
|
d := &net.Dialer{Timeout: timeout}
|
||||||
|
conn, err := tls.DialWithDialer(d, "tcp", target.String(), &tls.Config{InsecureSkipVerify: true, ServerName: target.Host})
|
||||||
|
if err != nil {
|
||||||
|
return "UNKNOWN", err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
state := conn.ConnectionState()
|
||||||
|
if len(state.PeerCertificates) == 0 {
|
||||||
|
return "UNKNOWN", errors.New("no peer certificates presented")
|
||||||
|
}
|
||||||
|
return evaluateCert(state.PeerCertificates[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func evaluateCert(cert *x509.Certificate) (string, error) {
|
||||||
|
// Expiry check
|
||||||
|
now := time.Now()
|
||||||
|
if !now.Before(cert.NotAfter) {
|
||||||
|
return "EXPIRED", fmt.Errorf("expired at %s", cert.NotAfter)
|
||||||
|
}
|
||||||
|
if !now.After(cert.NotBefore) {
|
||||||
|
return "EXPIRED", fmt.Errorf("not valid until %s", cert.NotBefore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revocation check using certlib/revoke
|
||||||
|
revoked, ok, err := revoke.VerifyCertificateError(cert)
|
||||||
|
if revoked {
|
||||||
|
// If revoked is true, ok will be true per implementation, err may describe why
|
||||||
|
return "REVOKED", err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
// Revocation status could not be determined
|
||||||
|
return "UNKNOWN", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return "OK", nil
|
||||||
|
}
|
||||||
@@ -110,6 +110,14 @@ func showBasicConstraints(cert *x509.Certificate) {
|
|||||||
|
|
||||||
if cert.IsCA {
|
if cert.IsCA {
|
||||||
fmt.Printf(", is a CA certificate")
|
fmt.Printf(", is a CA certificate")
|
||||||
|
if !cert.BasicConstraintsValid {
|
||||||
|
fmt.Printf(" (basic constraint failure)")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("is not a CA certificate")
|
||||||
|
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||||
|
fmt.Printf(" (key encipherment usage enabled!)")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func main() {
|
|||||||
|
|
||||||
for _, arg := range flag.Args() {
|
for _, arg := range flag.Args() {
|
||||||
if err := lookupHost(arg); err != nil {
|
if err := lookupHost(arg); err != nil {
|
||||||
log.Println("%s: %s", arg, err)
|
log.Printf("%s: %s", arg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const gzipExt = ".gz"
|
const gzipExt = ".gz"
|
||||||
@@ -18,29 +16,25 @@ const gzipExt = ".gz"
|
|||||||
func compress(path, target string, level int) error {
|
func compress(path, target string, level int) error {
|
||||||
sourceFile, err := os.Open(path)
|
sourceFile, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for read")
|
return fmt.Errorf("opening file for read: %w", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer sourceFile.Close()
|
||||||
|
|
||||||
destFile, err := os.Create(target)
|
destFile, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for write")
|
return fmt.Errorf("opening file for write: %w", err)
|
||||||
}
|
}
|
||||||
defer destFile.Close()
|
defer destFile.Close()
|
||||||
|
|
||||||
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
|
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid compression level")
|
return fmt.Errorf("invalid compression level: %w", err)
|
||||||
}
|
}
|
||||||
defer gzipCompressor.Close()
|
defer gzipCompressor.Close()
|
||||||
|
|
||||||
_, err = io.Copy(gzipCompressor, sourceFile)
|
_, err = io.Copy(gzipCompressor, sourceFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "compressing file")
|
return fmt.Errorf("compressing file: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "stat(2)ing destination file")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -49,25 +43,25 @@ func compress(path, target string, level int) error {
|
|||||||
func uncompress(path, target string) error {
|
func uncompress(path, target string) error {
|
||||||
sourceFile, err := os.Open(path)
|
sourceFile, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for read")
|
return fmt.Errorf("opening file for read: %w", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer sourceFile.Close()
|
||||||
|
|
||||||
gzipUncompressor, err := gzip.NewReader(sourceFile)
|
gzipUncompressor, err := gzip.NewReader(sourceFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "reading gzip headers")
|
return fmt.Errorf("reading gzip headers: %w", err)
|
||||||
}
|
}
|
||||||
defer gzipUncompressor.Close()
|
defer gzipUncompressor.Close()
|
||||||
|
|
||||||
destFile, err := os.Create(target)
|
destFile, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for write")
|
return fmt.Errorf("opening file for write: %w", err)
|
||||||
}
|
}
|
||||||
defer destFile.Close()
|
defer destFile.Close()
|
||||||
|
|
||||||
_, err = io.Copy(destFile, gzipUncompressor)
|
_, err = io.Copy(destFile, gzipUncompressor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "uncompressing file")
|
return fmt.Errorf("uncompressing file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -113,7 +107,7 @@ func pathForUncompressing(source, dest string) (string, error) {
|
|||||||
|
|
||||||
source = filepath.Base(source)
|
source = filepath.Base(source)
|
||||||
if !strings.HasSuffix(source, gzipExt) {
|
if !strings.HasSuffix(source, gzipExt) {
|
||||||
return "", errors.Errorf("%s is a not gzip-compressed file", source)
|
return "", fmt.Errorf("%s is a not gzip-compressed file", source)
|
||||||
}
|
}
|
||||||
outFile := source[:len(source)-len(gzipExt)]
|
outFile := source[:len(source)-len(gzipExt)]
|
||||||
outFile = filepath.Join(dest, outFile)
|
outFile = filepath.Join(dest, outFile)
|
||||||
@@ -127,7 +121,7 @@ func pathForCompressing(source, dest string) (string, error) {
|
|||||||
|
|
||||||
source = filepath.Base(source)
|
source = filepath.Base(source)
|
||||||
if strings.HasSuffix(source, gzipExt) {
|
if strings.HasSuffix(source, gzipExt) {
|
||||||
return "", errors.Errorf("%s is a gzip-compressed file", source)
|
return "", fmt.Errorf("%s is a gzip-compressed file", source)
|
||||||
}
|
}
|
||||||
|
|
||||||
dest = filepath.Join(dest, source+gzipExt)
|
dest = filepath.Join(dest, source+gzipExt)
|
||||||
|
|||||||
3
cmd/minmax/README
Normal file
3
cmd/minmax/README
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
minmax
|
||||||
|
|
||||||
|
A quick tool to calculate minmax codes if needed for uLisp.
|
||||||
53
cmd/minmax/minmax.go
Normal file
53
cmd/minmax/minmax.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kinds = map[string]int{
|
||||||
|
"sym": 0,
|
||||||
|
"tf": 1,
|
||||||
|
"fn": 2,
|
||||||
|
"sp": 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
func dieIf(err error) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "[!] %s\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
fmt.Fprintf(os.Stderr, "usage: minmax type min max\n")
|
||||||
|
fmt.Fprintf(os.Stderr, " type is one of fn, sp, sym, tf\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if flag.NArg() != 3 {
|
||||||
|
usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
kind, ok := kinds[flag.Arg(0)]
|
||||||
|
if !ok {
|
||||||
|
usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
min, err := strconv.Atoi(flag.Arg(1))
|
||||||
|
dieIf(err)
|
||||||
|
|
||||||
|
max, err := strconv.Atoi(flag.Arg(2))
|
||||||
|
dieIf(err)
|
||||||
|
|
||||||
|
code := kind << 6
|
||||||
|
code += (min << 3)
|
||||||
|
code += max
|
||||||
|
fmt.Printf("%0o\n", code)
|
||||||
|
}
|
||||||
48
cmd/rolldie/main.go
Normal file
48
cmd/rolldie/main.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dieRollFormat = regexp.MustCompile(`^(\d+)[dD](\d+)$`)
|
||||||
|
|
||||||
|
func rollDie(count, sides int) []int {
|
||||||
|
sum := 0
|
||||||
|
var rolls []int
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
roll := rand.Intn(sides) + 1
|
||||||
|
sum += roll
|
||||||
|
rolls = append(rolls, roll)
|
||||||
|
}
|
||||||
|
|
||||||
|
rolls = append(rolls, sum)
|
||||||
|
return rolls
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
for _, arg := range flag.Args() {
|
||||||
|
if !dieRollFormat.MatchString(arg) {
|
||||||
|
fmt.Fprintf(os.Stderr, "invalid die format %s: should be XdY\n", arg)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
dieRoll := dieRollFormat.FindAllStringSubmatch(arg, -1)
|
||||||
|
count, err := strconv.Atoi(dieRoll[0][1])
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
sides, err := strconv.Atoi(dieRoll[0][2])
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
fmt.Println(rollDie(count, sides))
|
||||||
|
}
|
||||||
|
}
|
||||||
34
cmd/tlsinfo/README.txt
Normal file
34
cmd/tlsinfo/README.txt
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
tlsinfo: show TLS version, cipher, and peer certificates
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
Description
|
||||||
|
tlsinfo connects to a TLS server and prints the negotiated TLS version and
|
||||||
|
cipher suite, followed by details for each certificate in the server’s
|
||||||
|
presented chain (as provided by the server).
|
||||||
|
|
||||||
|
Usage
|
||||||
|
tlsinfo <hostname:port>
|
||||||
|
|
||||||
|
Output
|
||||||
|
The program prints the negotiated protocol and cipher, then one section per
|
||||||
|
certificate in the order received from the server. Example fields:
|
||||||
|
TLS Version: TLS 1.3
|
||||||
|
Cipher Suite: TLS_AES_128_GCM_SHA256
|
||||||
|
Certificate 1
|
||||||
|
Subject: CN=example.com, O=Example Corp, C=US
|
||||||
|
Issuer: CN=Example Root CA, O=Example Corp, C=US
|
||||||
|
DNS Names: [example.com www.example.com]
|
||||||
|
Not Before: 2025-01-01 00:00:00 +0000 UTC
|
||||||
|
Not After: 2026-01-01 23:59:59 +0000 UTC
|
||||||
|
|
||||||
|
Examples
|
||||||
|
# Inspect a public HTTPS endpoint
|
||||||
|
tlsinfo example.com:443
|
||||||
|
|
||||||
|
Notes
|
||||||
|
- Verification is intentionally disabled (InsecureSkipVerify=true). The tool
|
||||||
|
does not validate the server certificate or hostname; it is for inspection
|
||||||
|
only.
|
||||||
|
- The SNI/ServerName is inferred from <hostname> when applicable.
|
||||||
|
- You must specify a port (e.g., 443 for HTTPS).
|
||||||
|
- The entire certificate chain is printed exactly as presented by the server.
|
||||||
64
cmd/tlsinfo/main.go
Normal file
64
cmd/tlsinfo/main.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
fmt.Printf("Usage: %s ‹hostname:port>\n", os.Args[0])
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
hostPort := os.Args[1]
|
||||||
|
conn, err := tls.Dial("tcp", hostPort, &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed to connect to the TLS server: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
state := conn.ConnectionState()
|
||||||
|
printConnectionDetails(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printConnectionDetails(state tls.ConnectionState) {
|
||||||
|
version := tlsVersion(state.Version)
|
||||||
|
cipherSuite := tls.CipherSuiteName(state.CipherSuite)
|
||||||
|
fmt.Printf("TLS Version: %s\n", version)
|
||||||
|
fmt.Printf("Cipher Suite: %s\n", cipherSuite)
|
||||||
|
printPeerCertificates(state.PeerCertificates)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tlsVersion(version uint16) string {
|
||||||
|
switch version {
|
||||||
|
|
||||||
|
case tls.VersionTLS13:
|
||||||
|
return "TLS 1.3"
|
||||||
|
case tls.VersionTLS12:
|
||||||
|
|
||||||
|
return "TLS 1.2"
|
||||||
|
case tls.VersionTLS11:
|
||||||
|
return "TLS 1.1"
|
||||||
|
case tls.VersionTLS10:
|
||||||
|
return "TLS 1.0"
|
||||||
|
default:
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printPeerCertificates(certificates []*x509.Certificate) {
|
||||||
|
for i, cert := range certificates {
|
||||||
|
fmt.Printf("Certificate %d\n", i+1)
|
||||||
|
fmt.Printf("\tSubject: %s\n", cert.Subject)
|
||||||
|
fmt.Printf("\tIssuer: %s\n", cert.Issuer)
|
||||||
|
fmt.Printf("\tDNS Names: %v\n", cert.DNSNames)
|
||||||
|
fmt.Printf("\tNot Before: %s\n:", cert.NotBefore)
|
||||||
|
fmt.Printf("\tNot After: %s\n", cert.NotAfter)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -68,7 +68,7 @@ func showFile(path string) {
|
|||||||
func searchFile(path string, search *regexp.Regexp) error {
|
func searchFile(path string, search *regexp.Regexp) error {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%v")
|
errorf("%v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ package config
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -33,14 +33,15 @@ func SetEnvPrefix(pfx string) {
|
|||||||
prefix = pfx
|
prefix = pfx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const keyValueSplitLength = 2
|
||||||
|
|
||||||
func addLine(line string) {
|
func addLine(line string) {
|
||||||
if strings.HasPrefix(line, "#") || line == "" {
|
if strings.HasPrefix(line, "#") || line == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lineParts := strings.SplitN(line, "=", 2)
|
lineParts := strings.SplitN(line, "=", keyValueSplitLength)
|
||||||
if len(lineParts) != 2 {
|
if len(lineParts) != keyValueSplitLength {
|
||||||
log.Print("skipping line: ", line)
|
|
||||||
return // silently ignore empty keys
|
return // silently ignore empty keys
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,7 +50,7 @@ func addLine(line string) {
|
|||||||
vars[lineParts[0]] = lineParts[1]
|
vars[lineParts[0]] = lineParts[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadFile scans the file at path for key=value pairs and adds them
|
// LoadFile scans the file at 'path' for key=value pairs and adds them
|
||||||
// to the configuration.
|
// to the configuration.
|
||||||
func LoadFile(path string) error {
|
func LoadFile(path string) error {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
@@ -64,25 +65,19 @@ func LoadFile(path string) error {
|
|||||||
addLine(line)
|
addLine(line)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = scanner.Err(); err != nil {
|
return scanner.Err()
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
// LoadFileFor scans the ini file at 'path', loading the default section
|
||||||
}
|
// and overriding any keys found under 'section'. If strict is true, the
|
||||||
|
// named section must exist (i.e., to catch typos in the section name).
|
||||||
// LoadFileFor scans the ini file at path, loading the default section
|
|
||||||
// and overriding any keys found under section. If strict is true, the
|
|
||||||
// named section must exist (i.e. to catch typos in the section name).
|
|
||||||
func LoadFileFor(path, section string, strict bool) error {
|
func LoadFileFor(path, section string, strict bool) error {
|
||||||
cmap, err := iniconf.ParseFile(path)
|
cmap, err := iniconf.ParseFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, value := range cmap[iniconf.DefaultSection] {
|
maps.Copy(vars, cmap[iniconf.DefaultSection])
|
||||||
vars[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
smap, ok := cmap[section]
|
smap, ok := cmap[section]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -92,9 +87,7 @@ func LoadFileFor(path, section string, strict bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, value := range smap {
|
maps.Copy(vars, smap)
|
||||||
vars[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -111,7 +104,7 @@ func Get(key string) string {
|
|||||||
|
|
||||||
// GetDefault retrieves a value from either a configuration file or
|
// GetDefault retrieves a value from either a configuration file or
|
||||||
// the environment. Note that value from a file will override
|
// the environment. Note that value from a file will override
|
||||||
// environment variables. If a value isn't found (e.g. Get returns an
|
// environment variables. If a value isn't found (e.g., Get returns an
|
||||||
// empty string), the default value will be used.
|
// empty string), the default value will be used.
|
||||||
func GetDefault(key, def string) string {
|
func GetDefault(key, def string) string {
|
||||||
if v := Get(key); v != "" {
|
if v := Get(key); v != "" {
|
||||||
@@ -121,8 +114,7 @@ func GetDefault(key, def string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Require retrieves a value from either a configuration file or the
|
// Require retrieves a value from either a configuration file or the
|
||||||
// environment. If the key isn't present, it will call log.Fatal, printing
|
// environment. If the key isn't present, it will panic.
|
||||||
// the missing key.
|
|
||||||
func Require(key string) string {
|
func Require(key string) string {
|
||||||
if v, ok := vars[key]; ok {
|
if v, ok := vars[key]; ok {
|
||||||
return v
|
return v
|
||||||
@@ -135,7 +127,7 @@ func Require(key string) string {
|
|||||||
envMessage = " (note: looked for the key " + prefix + key
|
envMessage = " (note: looked for the key " + prefix + key
|
||||||
envMessage += " in the local env)"
|
envMessage += " in the local env)"
|
||||||
}
|
}
|
||||||
log.Fatalf("missing required configuration value %s%s", key, envMessage)
|
panic(fmt.Sprintf("missing required configuration value %s%s", key, envMessage))
|
||||||
}
|
}
|
||||||
|
|
||||||
return v
|
return v
|
||||||
@@ -143,7 +135,8 @@ func Require(key string) string {
|
|||||||
|
|
||||||
// ListKeys returns a slice of the currently known keys.
|
// ListKeys returns a slice of the currently known keys.
|
||||||
func ListKeys() []string {
|
func ListKeys() []string {
|
||||||
keyList := []string{}
|
var keyList []string
|
||||||
|
|
||||||
for k := range vars {
|
for k := range vars {
|
||||||
keyList = append(keyList, k)
|
keyList = append(keyList, k)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,26 @@
|
|||||||
package config
|
package config_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
testFilePath = "testdata/test.env"
|
testFilePath = "testdata/test.env"
|
||||||
|
|
||||||
// Keys
|
// Key constants.
|
||||||
kOrder = "ORDER"
|
kOrder = "ORDER"
|
||||||
kSpecies = "SPECIES"
|
kSpecies = "SPECIES"
|
||||||
kName = "COMMON_NAME"
|
kName = "COMMON_NAME"
|
||||||
|
|
||||||
// Env
|
|
||||||
eOrder = "corvus"
|
eOrder = "corvus"
|
||||||
eSpecies = "corvus corax"
|
eSpecies = "corvus corax"
|
||||||
eName = "northern raven"
|
eName = "northern raven"
|
||||||
|
|
||||||
// File
|
|
||||||
fOrder = "stringiformes"
|
fOrder = "stringiformes"
|
||||||
fSpecies = "strix aluco"
|
fSpecies = "strix aluco"
|
||||||
// Name isn't set in the file to test fall through.
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -31,8 +30,8 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadEnvOnly(t *testing.T) {
|
func TestLoadEnvOnly(t *testing.T) {
|
||||||
order := Get(kOrder)
|
order := config.Get(kOrder)
|
||||||
species := Get(kSpecies)
|
species := config.Get(kSpecies)
|
||||||
if order != eOrder {
|
if order != eOrder {
|
||||||
t.Errorf("want %s, have %s", eOrder, order)
|
t.Errorf("want %s, have %s", eOrder, order)
|
||||||
}
|
}
|
||||||
@@ -43,14 +42,14 @@ func TestLoadEnvOnly(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadFile(t *testing.T) {
|
func TestLoadFile(t *testing.T) {
|
||||||
err := LoadFile(testFilePath)
|
err := config.LoadFile(testFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
order := Get(kOrder)
|
order := config.Get(kOrder)
|
||||||
species := Get(kSpecies)
|
species := config.Get(kSpecies)
|
||||||
name := Get(kName)
|
name := config.Get(kName)
|
||||||
|
|
||||||
if order != fOrder {
|
if order != fOrder {
|
||||||
t.Errorf("want %s, have %s", fOrder, order)
|
t.Errorf("want %s, have %s", fOrder, order)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package iniconf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -23,86 +24,115 @@ var (
|
|||||||
var DefaultSection = "default"
|
var DefaultSection = "default"
|
||||||
|
|
||||||
// ParseFile attempts to load the named config file.
|
// ParseFile attempts to load the named config file.
|
||||||
func ParseFile(fileName string) (cfg ConfigMap, err error) {
|
func ParseFile(fileName string) (ConfigMap, error) {
|
||||||
var file *os.File
|
file, err := os.Open(fileName)
|
||||||
file, err = os.Open(fileName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
return ParseReader(file)
|
return ParseReader(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseReader reads a configuration from an io.Reader.
|
// ParseReader reads a configuration from an io.Reader.
|
||||||
func ParseReader(r io.Reader) (cfg ConfigMap, err error) {
|
func ParseReader(r io.Reader) (ConfigMap, error) {
|
||||||
cfg = ConfigMap{}
|
cfg := ConfigMap{}
|
||||||
buf := bufio.NewReader(r)
|
buf := bufio.NewReader(r)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
line string
|
line string
|
||||||
longLine bool
|
longLine bool
|
||||||
currentSection string
|
currentSection string
|
||||||
lineBytes []byte
|
err error
|
||||||
isPrefix bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
err = nil
|
line, longLine, err = readConfigLine(buf, line, longLine)
|
||||||
lineBytes, isPrefix, err = buf.ReadLine()
|
if errors.Is(err, io.EOF) {
|
||||||
if io.EOF == err {
|
|
||||||
err = nil
|
err = nil
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
break
|
break
|
||||||
} else if isPrefix {
|
|
||||||
line += string(lineBytes)
|
|
||||||
|
|
||||||
longLine = true
|
|
||||||
continue
|
|
||||||
} else if longLine {
|
|
||||||
line += string(lineBytes)
|
|
||||||
longLine = false
|
|
||||||
} else {
|
|
||||||
line = string(lineBytes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if commentLine.MatchString(line) {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
} else if blankLine.MatchString(line) {
|
}
|
||||||
continue
|
|
||||||
} else if configSection.MatchString(line) {
|
currentSection, err = processConfigLine(cfg, line, currentSection)
|
||||||
section := configSection.ReplaceAllString(line,
|
if err != nil {
|
||||||
"$1")
|
|
||||||
if section == "" {
|
|
||||||
err = fmt.Errorf("invalid structure in file")
|
|
||||||
break
|
break
|
||||||
} else if !cfg.SectionInConfig(section) {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// readConfigLine reads and assembles a complete configuration line, handling long lines.
|
||||||
|
func readConfigLine(buf *bufio.Reader, currentLine string, longLine bool) (string, bool, error) {
|
||||||
|
lineBytes, isPrefix, err := buf.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isPrefix {
|
||||||
|
return currentLine + string(lineBytes), true, nil
|
||||||
|
} else if longLine {
|
||||||
|
return currentLine + string(lineBytes), false, nil
|
||||||
|
}
|
||||||
|
return string(lineBytes), false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processConfigLine processes a single line and updates the configuration map.
|
||||||
|
func processConfigLine(cfg ConfigMap, line string, currentSection string) (string, error) {
|
||||||
|
if commentLine.MatchString(line) || blankLine.MatchString(line) {
|
||||||
|
return currentSection, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if configSection.MatchString(line) {
|
||||||
|
return handleSectionLine(cfg, line)
|
||||||
|
}
|
||||||
|
|
||||||
|
if configLine.MatchString(line) {
|
||||||
|
return handleConfigLine(cfg, line, currentSection)
|
||||||
|
}
|
||||||
|
|
||||||
|
return currentSection, errors.New("invalid config file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSectionLine processes a section header line.
|
||||||
|
func handleSectionLine(cfg ConfigMap, line string) (string, error) {
|
||||||
|
section := configSection.ReplaceAllString(line, "$1")
|
||||||
|
if section == "" {
|
||||||
|
return "", errors.New("invalid structure in file")
|
||||||
|
}
|
||||||
|
if !cfg.SectionInConfig(section) {
|
||||||
cfg[section] = make(map[string]string, 0)
|
cfg[section] = make(map[string]string, 0)
|
||||||
}
|
}
|
||||||
currentSection = section
|
return section, nil
|
||||||
} else if configLine.MatchString(line) {
|
}
|
||||||
|
|
||||||
|
// handleConfigLine processes a key=value configuration line.
|
||||||
|
func handleConfigLine(cfg ConfigMap, line string, currentSection string) (string, error) {
|
||||||
regex := configLine
|
regex := configLine
|
||||||
if quotedConfigLine.MatchString(line) {
|
if quotedConfigLine.MatchString(line) {
|
||||||
regex = quotedConfigLine
|
regex = quotedConfigLine
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentSection == "" {
|
if currentSection == "" {
|
||||||
currentSection = DefaultSection
|
currentSection = DefaultSection
|
||||||
if !cfg.SectionInConfig(currentSection) {
|
if !cfg.SectionInConfig(currentSection) {
|
||||||
cfg[currentSection] = map[string]string{}
|
cfg[currentSection] = map[string]string{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
key := regex.ReplaceAllString(line, "$1")
|
key := regex.ReplaceAllString(line, "$1")
|
||||||
val := regex.ReplaceAllString(line, "$2")
|
val := regex.ReplaceAllString(line, "$2")
|
||||||
if key == "" {
|
if key != "" {
|
||||||
continue
|
|
||||||
}
|
|
||||||
cfg[currentSection][key] = val
|
cfg[currentSection][key] = val
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("invalid config file")
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return
|
return currentSection, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SectionInConfig determines whether a section is in the configuration.
|
// SectionInConfig determines whether a section is in the configuration.
|
||||||
@@ -112,41 +142,39 @@ func (c ConfigMap) SectionInConfig(section string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListSections returns the list of sections in the config map.
|
// ListSections returns the list of sections in the config map.
|
||||||
func (c ConfigMap) ListSections() (sections []string) {
|
func (c ConfigMap) ListSections() []string {
|
||||||
|
sections := make([]string, 0, len(c))
|
||||||
for section := range c {
|
for section := range c {
|
||||||
sections = append(sections, section)
|
sections = append(sections, section)
|
||||||
}
|
}
|
||||||
return
|
return sections
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteFile writes out the configuration to a file.
|
// WriteFile writes out the configuration to a file.
|
||||||
func (c ConfigMap) WriteFile(filename string) (err error) {
|
func (c ConfigMap) WriteFile(filename string) error {
|
||||||
file, err := os.Create(filename)
|
file, err := os.Create(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
for _, section := range c.ListSections() {
|
for _, section := range c.ListSections() {
|
||||||
sName := fmt.Sprintf("[ %s ]\n", section)
|
sName := fmt.Sprintf("[ %s ]\n", section)
|
||||||
_, err = file.Write([]byte(sName))
|
if _, err = file.WriteString(sName); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range c[section] {
|
for k, v := range c[section] {
|
||||||
line := fmt.Sprintf("%s = %s\n", k, v)
|
line := fmt.Sprintf("%s = %s\n", k, v)
|
||||||
_, err = file.Write([]byte(line))
|
if _, err = file.WriteString(line); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err = file.Write([]byte{0x0a})
|
if _, err = file.Write([]byte{0x0a}); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSection creates a new section in the config map.
|
// AddSection creates a new section in the config map.
|
||||||
@@ -170,27 +198,26 @@ func (c ConfigMap) AddKeyVal(section, key, val string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetValue retrieves the value from a key map.
|
// GetValue retrieves the value from a key map.
|
||||||
func (c ConfigMap) GetValue(section, key string) (val string, present bool) {
|
func (c ConfigMap) GetValue(section, key string) (string, bool) {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
if section == "" {
|
if section == "" {
|
||||||
section = DefaultSection
|
section = DefaultSection
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ok := c[section]
|
if _, ok := c[section]; !ok {
|
||||||
if !ok {
|
return "", false
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
val, present = c[section][key]
|
val, present := c[section][key]
|
||||||
return
|
return val, present
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetValueDefault retrieves the value from a key map if present,
|
// GetValueDefault retrieves the value from a key map if present,
|
||||||
// otherwise the default value.
|
// otherwise the default value.
|
||||||
func (c ConfigMap) GetValueDefault(section, key, value string) (val string) {
|
func (c ConfigMap) GetValueDefault(section, key, value string) string {
|
||||||
kval, ok := c.GetValue(section, key)
|
kval, ok := c.GetValue(section, key)
|
||||||
if !ok {
|
if !ok {
|
||||||
return value
|
return value
|
||||||
@@ -199,7 +226,7 @@ func (c ConfigMap) GetValueDefault(section, key, value string) (val string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SectionKeys returns the sections in the config map.
|
// SectionKeys returns the sections in the config map.
|
||||||
func (c ConfigMap) SectionKeys(section string) (keys []string, present bool) {
|
func (c ConfigMap) SectionKeys(section string) ([]string, bool) {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@@ -208,13 +235,12 @@ func (c ConfigMap) SectionKeys(section string) (keys []string, present bool) {
|
|||||||
section = DefaultSection
|
section = DefaultSection
|
||||||
}
|
}
|
||||||
|
|
||||||
cm := c
|
s, ok := c[section]
|
||||||
s, ok := cm[section]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
keys = make([]string, 0, len(s))
|
keys := make([]string, 0, len(s))
|
||||||
for key := range s {
|
for key := range s {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,19 @@
|
|||||||
package iniconf
|
package iniconf_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/config/iniconf"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FailWithError is a utility for dumping errors and failing the test.
|
// FailWithError is a utility for dumping errors and failing the test.
|
||||||
func FailWithError(t *testing.T, err error) {
|
func FailWithError(t *testing.T, err error) {
|
||||||
fmt.Println("failed")
|
t.Log("failed")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("[!] ", err.Error())
|
t.Log("[!] ", err.Error())
|
||||||
}
|
}
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
@@ -49,47 +50,50 @@ func stringSlicesEqual(slice1, slice2 []string) bool {
|
|||||||
|
|
||||||
func TestGoodConfig(t *testing.T) {
|
func TestGoodConfig(t *testing.T) {
|
||||||
testFile := "testdata/test.conf"
|
testFile := "testdata/test.conf"
|
||||||
fmt.Printf("[+] validating known-good config... ")
|
t.Logf("[+] validating known-good config... ")
|
||||||
cmap, err := ParseFile(testFile)
|
cmap, err := iniconf.ParseFile(testFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
} else if len(cmap) != 2 {
|
} else if len(cmap) != 2 {
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
fmt.Println("ok")
|
t.Log("ok")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGoodConfig2(t *testing.T) {
|
func TestGoodConfig2(t *testing.T) {
|
||||||
testFile := "testdata/test2.conf"
|
testFile := "testdata/test2.conf"
|
||||||
fmt.Printf("[+] validating second known-good config... ")
|
t.Logf("[+] validating second known-good config... ")
|
||||||
cmap, err := ParseFile(testFile)
|
cmap, err := iniconf.ParseFile(testFile)
|
||||||
if err != nil {
|
switch {
|
||||||
|
case err != nil:
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
} else if len(cmap) != 1 {
|
case len(cmap) != 1:
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
} else if len(cmap["default"]) != 3 {
|
case len(cmap["default"]) != 3:
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
|
default:
|
||||||
|
// nothing to do here
|
||||||
}
|
}
|
||||||
fmt.Println("ok")
|
t.Log("ok")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadConfig(t *testing.T) {
|
func TestBadConfig(t *testing.T) {
|
||||||
testFile := "testdata/bad.conf"
|
testFile := "testdata/bad.conf"
|
||||||
fmt.Printf("[+] ensure invalid config file fails... ")
|
t.Logf("[+] ensure invalid config file fails... ")
|
||||||
_, err := ParseFile(testFile)
|
_, err := iniconf.ParseFile(testFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = fmt.Errorf("invalid config file should fail")
|
err = errors.New("invalid config file should fail")
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
fmt.Println("ok")
|
t.Log("ok")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteConfigFile(t *testing.T) {
|
func TestWriteConfigFile(t *testing.T) {
|
||||||
fmt.Printf("[+] ensure config file is written properly... ")
|
t.Logf("[+] ensure config file is written properly... ")
|
||||||
const testFile = "testdata/test.conf"
|
const testFile = "testdata/test.conf"
|
||||||
const testOut = "testdata/test.out"
|
const testOut = "testdata/test.out"
|
||||||
|
|
||||||
cmap, err := ParseFile(testFile)
|
cmap, err := iniconf.ParseFile(testFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
@@ -100,7 +104,7 @@ func TestWriteConfigFile(t *testing.T) {
|
|||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmap2, err := ParseFile(testOut)
|
cmap2, err := iniconf.ParseFile(testOut)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
@@ -110,25 +114,25 @@ func TestWriteConfigFile(t *testing.T) {
|
|||||||
sort.Strings(sectionList1)
|
sort.Strings(sectionList1)
|
||||||
sort.Strings(sectionList2)
|
sort.Strings(sectionList2)
|
||||||
if !stringSlicesEqual(sectionList1, sectionList2) {
|
if !stringSlicesEqual(sectionList1, sectionList2) {
|
||||||
err = fmt.Errorf("section lists don't match")
|
err = errors.New("section lists don't match")
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, section := range sectionList1 {
|
for _, section := range sectionList1 {
|
||||||
for _, k := range cmap[section] {
|
for _, k := range cmap[section] {
|
||||||
if cmap[section][k] != cmap2[section][k] {
|
if cmap[section][k] != cmap2[section][k] {
|
||||||
err = fmt.Errorf("config key doesn't match")
|
err = errors.New("config key doesn't match")
|
||||||
FailWithError(t, err)
|
FailWithError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Println("ok")
|
t.Log("ok")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQuotedValue(t *testing.T) {
|
func TestQuotedValue(t *testing.T) {
|
||||||
testFile := "testdata/test.conf"
|
testFile := "testdata/test.conf"
|
||||||
fmt.Printf("[+] validating quoted value... ")
|
t.Logf("[+] validating quoted value... ")
|
||||||
cmap, _ := ParseFile(testFile)
|
cmap, _ := iniconf.ParseFile(testFile)
|
||||||
val := cmap["sectionName"]["key4"]
|
val := cmap["sectionName"]["key4"]
|
||||||
if val != " space at beginning and end " {
|
if val != " space at beginning and end " {
|
||||||
FailWithError(t, errors.New("Wrong value in double quotes ["+val+"]"))
|
FailWithError(t, errors.New("Wrong value in double quotes ["+val+"]"))
|
||||||
@@ -138,5 +142,5 @@ func TestQuotedValue(t *testing.T) {
|
|||||||
if val != " is quoted with single quotes " {
|
if val != " is quoted with single quotes " {
|
||||||
FailWithError(t, errors.New("Wrong value in single quotes ["+val+"]"))
|
FailWithError(t, errors.New("Wrong value in single quotes ["+val+"]"))
|
||||||
}
|
}
|
||||||
fmt.Println("ok")
|
t.Log("ok")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !linux
|
//go:build !linux
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
package config
|
package config_test
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/config"
|
||||||
|
)
|
||||||
|
|
||||||
func TestDefaultPath(t *testing.T) {
|
func TestDefaultPath(t *testing.T) {
|
||||||
t.Log(DefaultConfigPath("demoapp", "app.conf"))
|
t.Log(config.DefaultConfigPath("demoapp", "app.conf"))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func ToFile(path string) (*DebugPrinter, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// To sets up a new DebugPrint to an io.WriteCloser.
|
// To will set up a new DebugPrint to an io.WriteCloser.
|
||||||
func To(w io.WriteCloser) *DebugPrinter {
|
func To(w io.WriteCloser) *DebugPrinter {
|
||||||
return &DebugPrinter{
|
return &DebugPrinter{
|
||||||
out: w,
|
out: w,
|
||||||
@@ -55,21 +55,21 @@ func To(w io.WriteCloser) *DebugPrinter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print calls fmt.Print if Enabled is true.
|
// Print calls fmt.Print if Enabled is true.
|
||||||
func (dbg *DebugPrinter) Print(v ...interface{}) {
|
func (dbg *DebugPrinter) Print(v ...any) {
|
||||||
if dbg.Enabled {
|
if dbg.Enabled {
|
||||||
fmt.Fprint(dbg.out, v...)
|
fmt.Fprint(dbg.out, v...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Println calls fmt.Println if Enabled is true.
|
// Println calls fmt.Println if Enabled is true.
|
||||||
func (dbg *DebugPrinter) Println(v ...interface{}) {
|
func (dbg *DebugPrinter) Println(v ...any) {
|
||||||
if dbg.Enabled {
|
if dbg.Enabled {
|
||||||
fmt.Fprintln(dbg.out, v...)
|
fmt.Fprintln(dbg.out, v...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf calls fmt.Printf if Enabled is true.
|
// Printf calls fmt.Printf if Enabled is true.
|
||||||
func (dbg *DebugPrinter) Printf(format string, v ...interface{}) {
|
func (dbg *DebugPrinter) Printf(format string, v ...any) {
|
||||||
if dbg.Enabled {
|
if dbg.Enabled {
|
||||||
fmt.Fprintf(dbg.out, format, v...)
|
fmt.Fprintf(dbg.out, format, v...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package dbg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -50,7 +49,7 @@ func TestTo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFile(t *testing.T) {
|
func TestToFile(t *testing.T) {
|
||||||
testFile, err := ioutil.TempFile("", "dbg")
|
testFile, err := os.CreateTemp(t.TempDir(), "dbg")
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
err = testFile.Close()
|
err = testFile.Close()
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
@@ -103,7 +102,7 @@ func TestWriting(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestToFileError(t *testing.T) {
|
func TestToFileError(t *testing.T) {
|
||||||
testFile, err := ioutil.TempFile("", "dbg")
|
testFile, err := os.CreateTemp(t.TempDir(), "dbg")
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
err = testFile.Chmod(0400)
|
err = testFile.Chmod(0400)
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
Simple fatal utilities for Go programs.
|
|
||||||
|
|
||||||
```
|
|
||||||
result, err := doSomething()
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
ok := processResult(result)
|
|
||||||
if !ok {
|
|
||||||
die.With("failed to process result %s", result.Name)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
// Package die contains utilities for fatal error handling.
|
// Package die contains utilities for fatal error handling. It
|
||||||
|
// presents simple fatal utilities for Go programs.
|
||||||
package die
|
package die
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -15,14 +16,14 @@ func If(err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With prints the message to stderr, appending a newline, and exits.
|
// With prints the message to stderr, appending a newline, and exits.
|
||||||
func With(fstr string, args ...interface{}) {
|
func With(fstr string, args ...any) {
|
||||||
out := fmt.Sprintf("[!] %s\n", fstr)
|
out := fmt.Sprintf("[!] %s\n", fstr)
|
||||||
fmt.Fprintf(os.Stderr, out, args...)
|
fmt.Fprintf(os.Stderr, out, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// When prints the error to stderr and exits if cond is true.
|
// When prints the error to stderr and exits if cond is true.
|
||||||
func When(cond bool, fstr string, args ...interface{}) {
|
func When(cond bool, fstr string, args ...any) {
|
||||||
if cond {
|
if cond {
|
||||||
With(fstr, args...)
|
With(fstr, args...)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
//go:build !windows
|
//go:build !windows
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
// Package fileutil contains common file functions.
|
// Package fileutil contains common file functions.
|
||||||
package fileutil
|
package fileutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
@@ -46,5 +46,9 @@ const (
|
|||||||
// Access returns a boolean indicating whether the mode being checked
|
// Access returns a boolean indicating whether the mode being checked
|
||||||
// for is valid.
|
// for is valid.
|
||||||
func Access(path string, mode int) error {
|
func Access(path string, mode int) error {
|
||||||
return unix.Access(path, uint32(mode))
|
// Validate the conversion to avoid potential integer overflow (gosec G115).
|
||||||
|
if mode < 0 || uint64(mode) > uint64(math.MaxUint32) {
|
||||||
|
return unix.EINVAL
|
||||||
|
}
|
||||||
|
return unix.Access(path, uint32(mode)) // #nosec G115 - handled above.
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build windows
|
//go:build windows
|
||||||
// +build windows
|
|
||||||
|
|
||||||
// Package fileutil contains common file functions.
|
// Package fileutil contains common file functions.
|
||||||
package fileutil
|
package fileutil
|
||||||
|
|||||||
9
go.mod
9
go.mod
@@ -1,18 +1,18 @@
|
|||||||
module git.wntrmute.dev/kyle/goutils
|
module git.wntrmute.dev/kyle/goutils
|
||||||
|
|
||||||
go 1.20
|
go 1.24.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/hashicorp/go-syslog v1.0.0
|
github.com/hashicorp/go-syslog v1.0.0
|
||||||
github.com/kr/text v0.2.0
|
github.com/kr/text v0.2.0
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
github.com/pkg/sftp v1.12.0
|
github.com/pkg/sftp v1.12.0
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b
|
golang.org/x/crypto v0.44.0
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
golang.org/x/sys v0.38.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/benbjohnson/clock v1.3.5
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/google/certificate-transparency-go v1.0.21
|
github.com/google/certificate-transparency-go v1.0.21
|
||||||
)
|
)
|
||||||
@@ -20,5 +20,6 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
13
go.sum
13
go.sum
@@ -1,3 +1,5 @@
|
|||||||
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
|
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
@@ -25,14 +27,15 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
|
|||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
|
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||||
|
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
var progname = filepath.Base(os.Args[0])
|
var progname = filepath.Base(os.Args[0])
|
||||||
|
|
||||||
// ProgName returns what lib thinks the program name is, namely the
|
// ProgName returns what lib thinks the program name is, namely the
|
||||||
// basename of of argv0.
|
// basename of argv0.
|
||||||
//
|
//
|
||||||
// It is similar to the Linux __progname function.
|
// It is similar to the Linux __progname function.
|
||||||
func ProgName() string {
|
func ProgName() string {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Package syslog is a syslog-type facility for logging.
|
// Package log is a syslog-type facility for logging.
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -17,7 +17,7 @@ type logger struct {
|
|||||||
writeConsole bool
|
writeConsole bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
func (log *logger) printf(p gsyslog.Priority, format string, args ...any) {
|
||||||
if !strings.HasSuffix(format, "\n") {
|
if !strings.HasSuffix(format, "\n") {
|
||||||
format += "\n"
|
format += "\n"
|
||||||
}
|
}
|
||||||
@@ -28,33 +28,33 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
|
_ = log.l.WriteLevel(p, fmt.Appendf(nil, format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) print(p gsyslog.Priority, args ...any) {
|
||||||
if p <= log.p && log.writeConsole {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Print(args...)
|
fmt.Print(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
|
_ = log.l.WriteLevel(p, fmt.Append(nil, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) println(p gsyslog.Priority, args ...any) {
|
||||||
if p <= log.p && log.writeConsole {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Println(args...)
|
fmt.Println(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
|
_ = log.l.WriteLevel(p, fmt.Appendln(nil, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) spew(args ...interface{}) {
|
func (log *logger) spew(args ...any) {
|
||||||
if log.p == gsyslog.LOG_DEBUG {
|
if log.p == gsyslog.LOG_DEBUG {
|
||||||
spew.Dump(args...)
|
spew.Dump(args...)
|
||||||
}
|
}
|
||||||
@@ -160,109 +160,109 @@ func Setup(opts *Options) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debug(args ...interface{}) {
|
func Debug(args ...any) {
|
||||||
log.print(gsyslog.LOG_DEBUG, args...)
|
log.print(gsyslog.LOG_DEBUG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Info(args ...interface{}) {
|
func Info(args ...any) {
|
||||||
log.print(gsyslog.LOG_INFO, args...)
|
log.print(gsyslog.LOG_INFO, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Notice(args ...interface{}) {
|
func Notice(args ...any) {
|
||||||
log.print(gsyslog.LOG_NOTICE, args...)
|
log.print(gsyslog.LOG_NOTICE, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warning(args ...interface{}) {
|
func Warning(args ...any) {
|
||||||
log.print(gsyslog.LOG_WARNING, args...)
|
log.print(gsyslog.LOG_WARNING, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Err(args ...interface{}) {
|
func Err(args ...any) {
|
||||||
log.print(gsyslog.LOG_ERR, args...)
|
log.print(gsyslog.LOG_ERR, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Crit(args ...interface{}) {
|
func Crit(args ...any) {
|
||||||
log.print(gsyslog.LOG_CRIT, args...)
|
log.print(gsyslog.LOG_CRIT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alert(args ...interface{}) {
|
func Alert(args ...any) {
|
||||||
log.print(gsyslog.LOG_ALERT, args...)
|
log.print(gsyslog.LOG_ALERT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emerg(args ...interface{}) {
|
func Emerg(args ...any) {
|
||||||
log.print(gsyslog.LOG_EMERG, args...)
|
log.print(gsyslog.LOG_EMERG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...any) {
|
||||||
log.println(gsyslog.LOG_DEBUG, args...)
|
log.println(gsyslog.LOG_DEBUG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Infoln(args ...interface{}) {
|
func Infoln(args ...any) {
|
||||||
log.println(gsyslog.LOG_INFO, args...)
|
log.println(gsyslog.LOG_INFO, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Noticeln(args ...interface{}) {
|
func Noticeln(args ...any) {
|
||||||
log.println(gsyslog.LOG_NOTICE, args...)
|
log.println(gsyslog.LOG_NOTICE, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warningln(args ...interface{}) {
|
func Warningln(args ...any) {
|
||||||
log.print(gsyslog.LOG_WARNING, args...)
|
log.print(gsyslog.LOG_WARNING, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Errln(args ...interface{}) {
|
func Errln(args ...any) {
|
||||||
log.println(gsyslog.LOG_ERR, args...)
|
log.println(gsyslog.LOG_ERR, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Critln(args ...interface{}) {
|
func Critln(args ...any) {
|
||||||
log.println(gsyslog.LOG_CRIT, args...)
|
log.println(gsyslog.LOG_CRIT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alertln(args ...interface{}) {
|
func Alertln(args ...any) {
|
||||||
log.println(gsyslog.LOG_ALERT, args...)
|
log.println(gsyslog.LOG_ALERT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emergln(args ...interface{}) {
|
func Emergln(args ...any) {
|
||||||
log.println(gsyslog.LOG_EMERG, args...)
|
log.println(gsyslog.LOG_EMERG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debugf(format string, args ...interface{}) {
|
func Debugf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Infof(format string, args ...interface{}) {
|
func Infof(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_INFO, format, args...)
|
log.printf(gsyslog.LOG_INFO, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Noticef(format string, args ...interface{}) {
|
func Noticef(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warningf(format string, args ...interface{}) {
|
func Warningf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_WARNING, format, args...)
|
log.printf(gsyslog.LOG_WARNING, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Errf(format string, args ...interface{}) {
|
func Errf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Critf(format string, args ...interface{}) {
|
func Critf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_CRIT, format, args...)
|
log.printf(gsyslog.LOG_CRIT, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alertf(format string, args ...interface{}) {
|
func Alertf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ALERT, format, args...)
|
log.printf(gsyslog.LOG_ALERT, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emergf(format string, args ...interface{}) {
|
func Emergf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_EMERG, format, args...)
|
log.printf(gsyslog.LOG_EMERG, format, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...any) {
|
||||||
log.println(gsyslog.LOG_ERR, args...)
|
log.println(gsyslog.LOG_ERR, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -279,7 +279,7 @@ func FatalError(err error, message string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
||||||
func Spew(args ...interface{}) {
|
func Spew(args ...any) {
|
||||||
log.spew(args...)
|
log.spew(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,5 +11,4 @@
|
|||||||
// This produces the output message
|
// This produces the output message
|
||||||
//
|
//
|
||||||
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
||||||
//
|
|
||||||
package logging
|
package logging
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ func main() {
|
|||||||
|
|
||||||
log.Info("example", "filelog test", nil)
|
log.Info("example", "filelog test", nil)
|
||||||
exampleNewFromFile()
|
exampleNewFromFile()
|
||||||
os.Remove("example.log")
|
_ = os.Remove("example.log")
|
||||||
os.Remove("example.err")
|
_ = os.Remove("example.err")
|
||||||
}
|
}
|
||||||
|
|
||||||
func exampleNewFromFile() {
|
func exampleNewFromFile() {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func Example() {
|
|||||||
map[string]string{"when": time.Now().String()})
|
map[string]string{"when": time.Now().String()})
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleNewFromFile() {
|
func ExampleNewSplitFile() {
|
||||||
flog, err := logging.NewSplitFile("example.log", "example.err", true)
|
flog, err := logging.NewSplitFile("example.log", "example.err", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("filelog", "failed to open logger",
|
log.Fatal("filelog", "failed to open logger",
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
package logging
|
package logging
|
||||||
|
|
||||||
import "os"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
// File writes its logs to file.
|
// File writes its logs to file.
|
||||||
type File struct {
|
type File struct {
|
||||||
@@ -8,22 +11,6 @@ type File struct {
|
|||||||
*LogWriter
|
*LogWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close calls close on the underlying log files.
|
|
||||||
func (fl *File) Close() error {
|
|
||||||
if fl.fo != nil {
|
|
||||||
if err := fl.fo.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fl.fo = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if fl.fe != nil {
|
|
||||||
return fl.fe.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFile creates a new Logger that writes all logs to the file
|
// NewFile creates a new Logger that writes all logs to the file
|
||||||
// specified by path. If overwrite is specified, the log file will be
|
// specified by path. If overwrite is specified, the log file will be
|
||||||
// truncated before writing. Otherwise, the log file will be appended
|
// truncated before writing. Otherwise, the log file will be appended
|
||||||
@@ -36,7 +23,7 @@ func NewFile(path string, overwrite bool) (*File, error) {
|
|||||||
if overwrite {
|
if overwrite {
|
||||||
fl.fo, err = os.Create(path)
|
fl.fo, err = os.Create(path)
|
||||||
} else {
|
} else {
|
||||||
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0644)
|
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600) // #nosec G302
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -59,7 +46,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
|||||||
if overwrite {
|
if overwrite {
|
||||||
fl.fo, err = os.Create(outpath)
|
fl.fo, err = os.Create(outpath)
|
||||||
} else {
|
} else {
|
||||||
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -69,14 +56,51 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
|||||||
if overwrite {
|
if overwrite {
|
||||||
fl.fe, err = os.Create(errpath)
|
fl.fe, err = os.Create(errpath)
|
||||||
} else {
|
} else {
|
||||||
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fl.Close()
|
if closeErr := fl.Close(); closeErr != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open error log: cleanup close failed: %v: %w", closeErr, err)
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fl.LogWriter = NewLogWriter(fl.fo, fl.fe)
|
fl.LogWriter = NewLogWriter(fl.fo, fl.fe)
|
||||||
return fl, nil
|
return fl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close calls close on the underlying log files.
|
||||||
|
func (fl *File) Close() error {
|
||||||
|
if fl.fo != nil {
|
||||||
|
if err := fl.fo.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fl.fo = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if fl.fe != nil {
|
||||||
|
return fl.fe.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl *File) Flush() error {
|
||||||
|
if err := fl.fo.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fl.fe.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl *File) Chmod(mode os.FileMode) error {
|
||||||
|
if err := fl.fo.Chmod(mode); err != nil {
|
||||||
|
return fmt.Errorf("failed to chmod output log: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fl.fe.Chmod(mode); err != nil {
|
||||||
|
return fmt.Errorf("failed to chmod error log: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,31 +32,6 @@ const (
|
|||||||
// DefaultLevel is the default logging level when none is provided.
|
// DefaultLevel is the default logging level when none is provided.
|
||||||
const DefaultLevel = LevelInfo
|
const DefaultLevel = LevelInfo
|
||||||
|
|
||||||
// Cheap integer to fixed-width decimal ASCII. Give a negative width
|
|
||||||
// to avoid zero-padding. (From log/log.go in the standard library).
|
|
||||||
func itoa(i int, wid int) string {
|
|
||||||
// Assemble decimal in reverse order.
|
|
||||||
var b [20]byte
|
|
||||||
bp := len(b) - 1
|
|
||||||
for i >= 10 || wid > 1 {
|
|
||||||
wid--
|
|
||||||
q := i / 10
|
|
||||||
b[bp] = byte('0' + i - q*10)
|
|
||||||
bp--
|
|
||||||
i = q
|
|
||||||
}
|
|
||||||
// i < 10
|
|
||||||
b[bp] = byte('0' + i)
|
|
||||||
return string(b[bp:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeToOut(level Level) bool {
|
|
||||||
if level < LevelWarning {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var levelPrefix = [...]string{
|
var levelPrefix = [...]string{
|
||||||
LevelDebug: "DEBUG",
|
LevelDebug: "DEBUG",
|
||||||
LevelInfo: "INFO",
|
LevelInfo: "INFO",
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package logging
|
package logging
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -14,7 +15,7 @@ import (
|
|||||||
// 1. The **level** attaches a notion of priority to the log message.
|
// 1. The **level** attaches a notion of priority to the log message.
|
||||||
// Several log levels are available:
|
// Several log levels are available:
|
||||||
//
|
//
|
||||||
// + FATAL (32): the system is in an unsuable state, and cannot
|
// + FATAL (32): the system is in an unusable state and cannot
|
||||||
// continue to run. Most of the logging for this will cause the
|
// continue to run. Most of the logging for this will cause the
|
||||||
// program to exit with an error code.
|
// program to exit with an error code.
|
||||||
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
||||||
@@ -34,9 +35,9 @@ import (
|
|||||||
// typically much higher than errors). For example, repeated
|
// typically much higher than errors). For example, repeated
|
||||||
// warnings might be a sign that the system is under attack.
|
// warnings might be a sign that the system is under attack.
|
||||||
// + INFO (2): informational message. This is a normal log message
|
// + INFO (2): informational message. This is a normal log message
|
||||||
// that is used to deliver information, such as recording
|
// used to deliver information, such as recording requests. Ops
|
||||||
// requests. Ops teams are never paged for informational
|
// teams are never paged for informational messages. This is the
|
||||||
// messages. This is the default log level.
|
// default log level.
|
||||||
// + DEBUG (1): debug-level message. These are only used during
|
// + DEBUG (1): debug-level message. These are only used during
|
||||||
// development or if a deployed system repeatedly sees abnormal
|
// development or if a deployed system repeatedly sees abnormal
|
||||||
// errors.
|
// errors.
|
||||||
@@ -65,10 +66,10 @@ import (
|
|||||||
//
|
//
|
||||||
// will cover the header:
|
// will cover the header:
|
||||||
//
|
//
|
||||||
// + ``$1`` contains the timestamp
|
// + “$1“ contains the timestamp
|
||||||
// + ``$2`` contains the level
|
// + “$2“ contains the level
|
||||||
// + ``$3`` contains the actor
|
// + “$3“ contains the actor
|
||||||
// + ``$4`` contains the event
|
// + “$4“ contains the event.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
// SetLevel sets the minimum log level.
|
// SetLevel sets the minimum log level.
|
||||||
SetLevel(Level)
|
SetLevel(Level)
|
||||||
@@ -131,7 +132,7 @@ func (lw *LogWriter) output(w io.Writer, lvl Level, actor, event string, attrs m
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Debug emits a debug-level message. These are only used during
|
// Debug emits a debug-level message. These are only used during
|
||||||
// development or if a deployed system repeatedly sees abnormal
|
// development, or if a deployed system repeatedly sees abnormal
|
||||||
// errors.
|
// errors.
|
||||||
//
|
//
|
||||||
// Actor specifies the component emitting the message; event indicates
|
// Actor specifies the component emitting the message; event indicates
|
||||||
@@ -213,7 +214,7 @@ func (lw *LogWriter) Critical(actor, event string, attrs map[string]string) {
|
|||||||
lw.output(lw.we, LevelCritical, actor, event, attrs)
|
lw.output(lw.we, LevelCritical, actor, event, attrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal emits a message indicating that the system is in an unsuable
|
// Fatal emits a message indicating that the system is in an unusable
|
||||||
// state, and cannot continue to run. The program will exit with exit
|
// state, and cannot continue to run. The program will exit with exit
|
||||||
// code 1.
|
// code 1.
|
||||||
//
|
//
|
||||||
@@ -229,9 +230,9 @@ func (lw *LogWriter) Fatal(actor, event string, attrs map[string]string) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FatalCode emits a message indicating that the system is in an unsuable
|
// FatalCode emits a message indicating that the system is in an unusable
|
||||||
// state, and cannot continue to run. The program will exit with the
|
// state, and cannot continue to run. The program will exit with the
|
||||||
// exit code speicfied in the exitcode argument.
|
// exit code specified in the exitcode argument.
|
||||||
//
|
//
|
||||||
// Actor specifies the component emitting the message; event indicates
|
// Actor specifies the component emitting the message; event indicates
|
||||||
// the event that caused the log message to be emitted. attrs is a map
|
// the event that caused the log message to be emitted. attrs is a map
|
||||||
@@ -245,7 +246,7 @@ func (lw *LogWriter) FatalCode(exitcode int, actor, event string, attrs map[stri
|
|||||||
os.Exit(exitcode)
|
os.Exit(exitcode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FatalNoDie emits a message indicating that the system is in an unsuable
|
// FatalNoDie emits a message indicating that the system is in an unusable
|
||||||
// state, and cannot continue to run. The program will not exit; it is
|
// state, and cannot continue to run. The program will not exit; it is
|
||||||
// assumed that the caller has some final clean up to perform.
|
// assumed that the caller has some final clean up to perform.
|
||||||
//
|
//
|
||||||
@@ -314,12 +315,18 @@ func (m *Multi) Status() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Multi) Close() error {
|
func (m *Multi) Close() error {
|
||||||
|
var errs []error
|
||||||
for _, l := range m.loggers {
|
for _, l := range m.loggers {
|
||||||
l.Close()
|
if err := l.Close(); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(errs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Multi) Debug(actor, event string, attrs map[string]string) {
|
func (m *Multi) Debug(actor, event string, attrs map[string]string) {
|
||||||
for _, l := range m.loggers {
|
for _, l := range m.loggers {
|
||||||
|
|||||||
@@ -1,30 +1,32 @@
|
|||||||
package logging
|
package logging_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/logging"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A list of implementations that should be tested.
|
// A list of implementations that should be tested.
|
||||||
var implementations []Logger
|
var implementations []logging.Logger
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
lw := NewLogWriter(&bytes.Buffer{}, nil)
|
lw := logging.NewLogWriter(&bytes.Buffer{}, nil)
|
||||||
cw := NewConsole()
|
cw := logging.NewConsole()
|
||||||
|
|
||||||
implementations = append(implementations, lw)
|
implementations = append(implementations, lw)
|
||||||
implementations = append(implementations, cw)
|
implementations = append(implementations, cw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileSetup(t *testing.T) {
|
func TestFileSetup(t *testing.T) {
|
||||||
fw1, err := NewFile("fw1.log", true)
|
fw1, err := logging.NewFile("fw1.log", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new file logger: %v", err)
|
t.Fatalf("failed to create new file logger: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fw2, err := NewSplitFile("fw2.log", "fw2.err", true)
|
fw2, err := logging.NewSplitFile("fw2.log", "fw2.err", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new split file logger: %v", err)
|
t.Fatalf("failed to create new split file logger: %v", err)
|
||||||
}
|
}
|
||||||
@@ -33,7 +35,7 @@ func TestFileSetup(t *testing.T) {
|
|||||||
implementations = append(implementations, fw2)
|
implementations = append(implementations, fw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestImplementations(t *testing.T) {
|
func TestImplementations(_ *testing.T) {
|
||||||
for _, l := range implementations {
|
for _, l := range implementations {
|
||||||
l.Info("TestImplementations", "Info message",
|
l.Info("TestImplementations", "Info message",
|
||||||
map[string]string{"type": fmt.Sprintf("%T", l)})
|
map[string]string{"type": fmt.Sprintf("%T", l)})
|
||||||
@@ -44,20 +46,30 @@ func TestImplementations(t *testing.T) {
|
|||||||
|
|
||||||
func TestCloseLoggers(t *testing.T) {
|
func TestCloseLoggers(t *testing.T) {
|
||||||
for _, l := range implementations {
|
for _, l := range implementations {
|
||||||
l.Close()
|
if err := l.Close(); err != nil {
|
||||||
|
t.Errorf("failed to close logger: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDestroyLogFiles(t *testing.T) {
|
func TestDestroyLogFiles(t *testing.T) {
|
||||||
os.Remove("fw1.log")
|
if err := os.Remove("fw1.log"); err != nil {
|
||||||
os.Remove("fw2.log")
|
t.Errorf("failed to remove fw1.log: %v", err)
|
||||||
os.Remove("fw2.err")
|
}
|
||||||
|
|
||||||
|
if err := os.Remove("fw2.log"); err != nil {
|
||||||
|
t.Errorf("failed to remove fw2.log: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Remove("fw2.err"); err != nil {
|
||||||
|
t.Errorf("failed to remove fw2.err: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMulti(t *testing.T) {
|
func TestMulti(t *testing.T) {
|
||||||
c1 := NewConsole()
|
c1 := logging.NewConsole()
|
||||||
c2 := NewConsole()
|
c2 := logging.NewConsole()
|
||||||
m := NewMulti(c1, c2)
|
m := logging.NewMulti(c1, c2)
|
||||||
if !m.Good() {
|
if !m.Good() {
|
||||||
t.Fatal("failed to set up multi logger")
|
t.Fatal("failed to set up multi logger")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,15 +8,15 @@ type mwc struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write implements the Writer interface.
|
// Write implements the Writer interface.
|
||||||
func (t *mwc) Write(p []byte) (n int, err error) {
|
func (t *mwc) Write(p []byte) (int, error) {
|
||||||
for _, w := range t.wcs {
|
for _, w := range t.wcs {
|
||||||
n, err = w.Write(p)
|
n, err := w.Write(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return n, err
|
||||||
}
|
}
|
||||||
if n != len(p) {
|
if n != len(p) {
|
||||||
err = io.ErrShortWrite
|
err = io.ErrShortWrite
|
||||||
return
|
return n, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package mwc
|
package mwc_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/assert"
|
"git.wntrmute.dev/kyle/goutils/assert"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/mwc"
|
||||||
"git.wntrmute.dev/kyle/goutils/testio"
|
"git.wntrmute.dev/kyle/goutils/testio"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -12,7 +13,7 @@ func TestMWC(t *testing.T) {
|
|||||||
buf1 := testio.NewBufCloser(nil)
|
buf1 := testio.NewBufCloser(nil)
|
||||||
buf2 := testio.NewBufCloser(nil)
|
buf2 := testio.NewBufCloser(nil)
|
||||||
|
|
||||||
mwc := MultiWriteCloser(buf1, buf2)
|
mwc := mwc.MultiWriteCloser(buf1, buf2)
|
||||||
|
|
||||||
_, err := mwc.Write([]byte("hello, world"))
|
_, err := mwc.Write([]byte("hello, world"))
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
@@ -30,15 +31,15 @@ func TestMWCShort(t *testing.T) {
|
|||||||
buf3 := testio.NewBrokenWriter(5)
|
buf3 := testio.NewBrokenWriter(5)
|
||||||
buf4 := testio.NewSilentBrokenWriter(5)
|
buf4 := testio.NewSilentBrokenWriter(5)
|
||||||
|
|
||||||
mwc := MultiWriteCloser(buf1, buf2, buf3)
|
multiWriter := mwc.MultiWriteCloser(buf1, buf2, buf3)
|
||||||
defer mwc.Close()
|
defer multiWriter.Close()
|
||||||
|
|
||||||
_, err := mwc.Write([]byte("hello, world"))
|
_, err := multiWriter.Write([]byte("hello, world"))
|
||||||
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
||||||
mwc.Close()
|
multiWriter.Close()
|
||||||
|
|
||||||
mwc = MultiWriteCloser(buf1, buf2, buf4)
|
multiWriter = mwc.MultiWriteCloser(buf1, buf2, buf4)
|
||||||
_, err = mwc.Write([]byte("hello, world"))
|
_, err = multiWriter.Write([]byte("hello, world"))
|
||||||
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +48,7 @@ func TestMWCClose(t *testing.T) {
|
|||||||
buf2 := testio.NewBufCloser(nil)
|
buf2 := testio.NewBufCloser(nil)
|
||||||
buf3 := testio.NewBrokenCloser(nil)
|
buf3 := testio.NewBrokenCloser(nil)
|
||||||
|
|
||||||
mwc := MultiWriteCloser(buf1, buf2, buf3)
|
mwc := mwc.MultiWriteCloser(buf1, buf2, buf3)
|
||||||
_, err := mwc.Write([]byte("hello, world"))
|
_, err := mwc.Write([]byte("hello, world"))
|
||||||
assert.NoErrorT(t, err)
|
assert.NoErrorT(t, err)
|
||||||
|
|
||||||
|
|||||||
49
rand/rand.go
49
rand/rand.go
@@ -1,49 +0,0 @@
|
|||||||
// Package rand contains utilities for interacting with math/rand, including
|
|
||||||
// seeding from a random sed.
|
|
||||||
package rand
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
mrand "math/rand"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CryptoUint64 generates a cryptographically-secure 64-bit integer.
|
|
||||||
func CryptoUint64() (uint64, error) {
|
|
||||||
bs := make([]byte, 8)
|
|
||||||
_, err := rand.Read(bs)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return binary.BigEndian.Uint64(bs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seed initialises the non-cryptographic PRNG with a random,
|
|
||||||
// cryptographically secure value. This is done just as a good
|
|
||||||
// way to make this random. The returned 64-bit value is the seed.
|
|
||||||
func Seed() (uint64, error) {
|
|
||||||
seed, err := CryptoUint64()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NB: this is permitted.
|
|
||||||
mrand.Seed(int64(seed))
|
|
||||||
return seed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int is a wrapper for math.Int so only one package needs to be imported.
|
|
||||||
func Int() int {
|
|
||||||
return mrand.Int()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intn is a wrapper for math.Intn so only one package needs to be imported.
|
|
||||||
func Intn(max int) int {
|
|
||||||
return mrand.Intn(max)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intn2 returns a random value between min and max, inclusive.
|
|
||||||
func Intn2(min, max int) int {
|
|
||||||
return Intn(max-min) + min
|
|
||||||
}
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
package rand
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
mrand "math/rand"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCryptoUint64(t *testing.T) {
|
|
||||||
n1, err := CryptoUint64()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n2, err := CryptoUint64()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This has such a low chance of occurring that it's likely to be
|
|
||||||
// indicative of a bad CSPRNG.
|
|
||||||
if n1 == n2 {
|
|
||||||
t.Fatalf("repeated random uint64s: %d", n1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIntn(t *testing.T) {
|
|
||||||
expected := []int{3081, 4887, 4847, 1059, 3081}
|
|
||||||
mrand.Seed(1)
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
n := Intn2(1000, 5000)
|
|
||||||
|
|
||||||
if n != expected[i] {
|
|
||||||
fmt.Printf("invalid sequence at %d: expected %d, have %d", i, expected[i], n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSeed(t *testing.T) {
|
|
||||||
seed1, err := Seed()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var seed2 uint64
|
|
||||||
n1 := Int()
|
|
||||||
tries := 0
|
|
||||||
|
|
||||||
for {
|
|
||||||
seed2, err = Seed()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if seed1 != seed2 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
tries++
|
|
||||||
|
|
||||||
if tries > 3 {
|
|
||||||
t.Fatal("can't generate two unique seeds")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n2 := Int()
|
|
||||||
|
|
||||||
// Again, this not impossible, merely statistically improbably and a
|
|
||||||
// potential canary for RNG issues.
|
|
||||||
if n1 == n2 {
|
|
||||||
t.Fatalf("repeated integers fresh from two unique seeds: %d/%d -> %d",
|
|
||||||
seed1, seed2, n1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
21
sbuf/sbuf.go
21
sbuf/sbuf.go
@@ -15,7 +15,7 @@ func zero(in []byte, n int) {
|
|||||||
stop = len(in)
|
stop = len(in)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < stop; i++ {
|
for i := range stop {
|
||||||
in[i] ^= in[i]
|
in[i] ^= in[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,7 +37,10 @@ func NewBuffer(n int) *Buffer {
|
|||||||
// original data will be wiped.
|
// original data will be wiped.
|
||||||
func NewBufferFrom(p []byte) *Buffer {
|
func NewBufferFrom(p []byte) *Buffer {
|
||||||
buf := NewBuffer(len(p))
|
buf := NewBuffer(len(p))
|
||||||
buf.Write(p)
|
_, err := buf.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
zero(p, len(p))
|
zero(p, len(p))
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
@@ -54,10 +57,7 @@ func (buf *Buffer) Read(p []byte) (int, error) {
|
|||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
copyLength := len(p)
|
copyLength := min(len(p), len(buf.buf))
|
||||||
if copyLength > len(buf.buf) {
|
|
||||||
copyLength = len(buf.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(p, buf.buf)
|
copy(p, buf.buf)
|
||||||
zero(buf.buf, len(p))
|
zero(buf.buf, len(p))
|
||||||
@@ -91,10 +91,7 @@ func (buf *Buffer) Write(p []byte) (int, error) {
|
|||||||
r := len(buf.buf) + len(p)
|
r := len(buf.buf) + len(p)
|
||||||
if cap(buf.buf) < r {
|
if cap(buf.buf) < r {
|
||||||
l := r
|
l := r
|
||||||
for {
|
for l <= r {
|
||||||
if l > r {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
l *= 2
|
l *= 2
|
||||||
}
|
}
|
||||||
buf.grow(l - cap(buf.buf))
|
buf.grow(l - cap(buf.buf))
|
||||||
@@ -107,7 +104,7 @@ func (buf *Buffer) Write(p []byte) (int, error) {
|
|||||||
func (buf *Buffer) WriteByte(c byte) error {
|
func (buf *Buffer) WriteByte(c byte) error {
|
||||||
r := len(buf.buf) + 1
|
r := len(buf.buf) + 1
|
||||||
if cap(buf.buf) < r {
|
if cap(buf.buf) < r {
|
||||||
l := r * 2
|
l := r << 1
|
||||||
buf.grow(l - cap(buf.buf))
|
buf.grow(l - cap(buf.buf))
|
||||||
}
|
}
|
||||||
buf.buf = append(buf.buf, c)
|
buf.buf = append(buf.buf, c)
|
||||||
@@ -138,7 +135,7 @@ func (buf *Buffer) Bytes() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
p := make([]byte, buf.Len())
|
p := make([]byte, buf.Len())
|
||||||
buf.Read(p)
|
_, _ = buf.Read(p)
|
||||||
buf.Close()
|
buf.Close()
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
package sbuf
|
package sbuf_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/sbuf"
|
||||||
"golang.org/x/crypto/nacl/box"
|
"golang.org/x/crypto/nacl/box"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
buf = &Buffer{}
|
buf = &sbuf.Buffer{}
|
||||||
testMessage1 = []byte("round and round and round we go, where we stop, no one knows")
|
testMessage1 = []byte("round and round and round we go, where we stop, no one knows")
|
||||||
testMessage2 = []byte("the deconstruction of falling stars")
|
testMessage2 = []byte("the deconstruction of falling stars")
|
||||||
)
|
)
|
||||||
@@ -113,23 +114,23 @@ func TestShortRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewBuffer(t *testing.T) {
|
func TestNewBuffer(t *testing.T) {
|
||||||
buf := NewBuffer(32)
|
testBuffer := sbuf.NewBuffer(32)
|
||||||
if len(buf.buf) != 0 {
|
if testBuffer.Len() != 0 {
|
||||||
t.Fatalf("expected new buffer length to be 0, have %d",
|
t.Fatalf("expected new buffer length to be 0, have %d",
|
||||||
len(buf.buf))
|
testBuffer.Len())
|
||||||
}
|
}
|
||||||
|
|
||||||
if cap(buf.buf) != 32 {
|
if testBuffer.Cap() != 32 {
|
||||||
t.Fatalf("expected new buffer capacity to be 0, have %d",
|
t.Fatalf("expected new buffer capacity to be 0, have %d",
|
||||||
cap(buf.buf))
|
testBuffer.Cap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewBufferFrom(t *testing.T) {
|
func TestNewBufferFrom(t *testing.T) {
|
||||||
p := make([]byte, len(testMessage1))
|
p := make([]byte, len(testMessage1))
|
||||||
copy(p, testMessage1)
|
copy(p, testMessage1)
|
||||||
buf := NewBufferFrom(p)
|
testBuffer := sbuf.NewBufferFrom(p)
|
||||||
if !bytes.Equal(buf.buf, testMessage1) {
|
if !bytes.Equal(testBuffer.Bytes(), testMessage1) {
|
||||||
t.Fatal("new buffer wasn't constructed properly")
|
t.Fatal("new buffer wasn't constructed properly")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,10 +138,10 @@ func TestNewBufferFrom(t *testing.T) {
|
|||||||
func TestBytes(t *testing.T) {
|
func TestBytes(t *testing.T) {
|
||||||
p := make([]byte, len(testMessage1))
|
p := make([]byte, len(testMessage1))
|
||||||
copy(p, testMessage1)
|
copy(p, testMessage1)
|
||||||
buf := NewBufferFrom(p)
|
testBuffer := sbuf.NewBufferFrom(p)
|
||||||
|
|
||||||
out := buf.Bytes()
|
out := testBuffer.Bytes()
|
||||||
if buf.buf != nil {
|
if testBuffer.Len() != 0 {
|
||||||
t.Fatal("buffer was not closed")
|
t.Fatal("buffer was not closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,21 +149,21 @@ func TestBytes(t *testing.T) {
|
|||||||
t.Fatal("buffer did not return the right data")
|
t.Fatal("buffer did not return the right data")
|
||||||
}
|
}
|
||||||
|
|
||||||
out = buf.Bytes()
|
out = testBuffer.Bytes()
|
||||||
if out != nil {
|
if out != nil {
|
||||||
t.Fatal("a closed buffer should return nil for Bytes")
|
t.Fatal("a closed buffer should return nil for Bytes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRWByte(t *testing.T) {
|
func TestRWByte(t *testing.T) {
|
||||||
buf := NewBuffer(0)
|
testBuffer := sbuf.NewBuffer(0)
|
||||||
c := byte(42)
|
c := byte(42)
|
||||||
err := buf.WriteByte(c)
|
err := testBuffer.WriteByte(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err = buf.ReadByte()
|
c, err = testBuffer.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -171,22 +172,21 @@ func TestRWByte(t *testing.T) {
|
|||||||
t.Fatalf("Expected 42, have %d", c)
|
t.Fatalf("Expected 42, have %d", c)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = buf.ReadByte()
|
_, err = testBuffer.ReadByte()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected EOF")
|
t.Fatal("Expected EOF")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRead(b *testing.B) {
|
func BenchmarkRead(b *testing.B) {
|
||||||
b.N = 2000
|
|
||||||
pub, priv, err := box.GenerateKey(rand.Reader)
|
pub, priv, err := box.GenerateKey(rand.Reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("%v", err)
|
b.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for b.Loop() {
|
||||||
_, err := buf.Write(priv[:])
|
_, err = buf.Write(priv[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("%v", err)
|
b.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -204,11 +204,11 @@ func BenchmarkFixed(b *testing.B) {
|
|||||||
b.Fatalf("%v", err)
|
b.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = NewBuffer(64 * b.N)
|
buf = sbuf.NewBuffer(64 * b.N)
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for b.Loop() {
|
||||||
_, err := buf.Write(priv[:])
|
_, err = buf.Write(priv[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("%v", err)
|
b.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,20 @@
|
|||||||
package seekbuf
|
package seekbuf_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/assert"
|
"git.wntrmute.dev/kyle/goutils/assert"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/seekbuf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSeeking(t *testing.T) {
|
func TestSeeking(t *testing.T) {
|
||||||
partA := []byte("hello, ")
|
partA := []byte("hello, ")
|
||||||
partB := []byte("world!")
|
partB := []byte("world!")
|
||||||
|
|
||||||
buf := New(partA)
|
buf := seekbuf.New(partA)
|
||||||
assert.BoolT(t, buf.Len() == len(partA), fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
|
assert.BoolT(t, buf.Len() == len(partA),
|
||||||
|
fmt.Sprintf("on init: have length %d, want length %d", buf.Len(), len(partA)))
|
||||||
|
|
||||||
b := make([]byte, 32)
|
b := make([]byte, 32)
|
||||||
|
|
||||||
@@ -32,7 +34,8 @@ func TestSeeking(t *testing.T) {
|
|||||||
|
|
||||||
partsLen := len(partA) + len(partB)
|
partsLen := len(partA) + len(partB)
|
||||||
buf.Rewind()
|
buf.Rewind()
|
||||||
assert.BoolT(t, buf.Len() == partsLen, fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
|
assert.BoolT(t, buf.Len() == partsLen,
|
||||||
|
fmt.Sprintf("after rewinding: have length %d, want length %d", buf.Len(), partsLen))
|
||||||
|
|
||||||
buf.Close()
|
buf.Close()
|
||||||
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))
|
assert.BoolT(t, buf.Len() == 0, fmt.Sprintf("after closing, have length %d, want length 0", buf.Len()))
|
||||||
|
|||||||
48
tee/tee.go
48
tee/tee.go
@@ -17,23 +17,6 @@ type Tee struct {
|
|||||||
Verbose bool
|
Verbose bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Tee) Write(p []byte) (int, error) {
|
|
||||||
n, err := os.Stdout.Write(p)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.f != nil {
|
|
||||||
return t.f.Write(p)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close calls Close on the underlying file.
|
|
||||||
func (t *Tee) Close() error {
|
|
||||||
return t.f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOut writes to standard output only. The file is created, not
|
// NewOut writes to standard output only. The file is created, not
|
||||||
// appended to.
|
// appended to.
|
||||||
func NewOut(logFile string) (*Tee, error) {
|
func NewOut(logFile string) (*Tee, error) {
|
||||||
@@ -48,9 +31,32 @@ func NewOut(logFile string) (*Tee, error) {
|
|||||||
return &Tee{f: f}, nil
|
return &Tee{f: f}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tee) Write(p []byte) (int, error) {
|
||||||
|
n, err := os.Stdout.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.f != nil {
|
||||||
|
return t.f.Write(p)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close calls Close on the underlying file if present.
|
||||||
|
// It is safe to call Close on a Tee with no file; in that case, it returns nil.
|
||||||
|
func (t *Tee) Close() error {
|
||||||
|
if t == nil || t.f == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := t.f.Close()
|
||||||
|
t.f = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Printf formats according to a format specifier and writes to the
|
// Printf formats according to a format specifier and writes to the
|
||||||
// tee instance.
|
// tee instance.
|
||||||
func (t *Tee) Printf(format string, args ...interface{}) (int, error) {
|
func (t *Tee) Printf(format string, args ...any) (int, error) {
|
||||||
s := fmt.Sprintf(format, args...)
|
s := fmt.Sprintf(format, args...)
|
||||||
n, err := os.Stdout.WriteString(s)
|
n, err := os.Stdout.WriteString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -66,7 +72,7 @@ func (t *Tee) Printf(format string, args ...interface{}) (int, error) {
|
|||||||
|
|
||||||
// VPrintf is a variant of Printf that only prints if the Tee's
|
// VPrintf is a variant of Printf that only prints if the Tee's
|
||||||
// Verbose flag is set.
|
// Verbose flag is set.
|
||||||
func (t *Tee) VPrintf(format string, args ...interface{}) (int, error) {
|
func (t *Tee) VPrintf(format string, args ...any) (int, error) {
|
||||||
if t.Verbose {
|
if t.Verbose {
|
||||||
return t.Printf(format, args...)
|
return t.Printf(format, args...)
|
||||||
}
|
}
|
||||||
@@ -87,12 +93,12 @@ func Open(logFile string) error {
|
|||||||
|
|
||||||
// Printf formats according to a format specifier and writes to the
|
// Printf formats according to a format specifier and writes to the
|
||||||
// global tee.
|
// global tee.
|
||||||
func Printf(format string, args ...interface{}) (int, error) {
|
func Printf(format string, args ...any) (int, error) {
|
||||||
return globalTee.Printf(format, args...)
|
return globalTee.Printf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// VPrintf calls VPrintf on the global tee instance.
|
// VPrintf calls VPrintf on the global tee instance.
|
||||||
func VPrintf(format string, args ...interface{}) (int, error) {
|
func VPrintf(format string, args ...any) (int, error) {
|
||||||
return globalTee.VPrintf(format, args...)
|
return globalTee.VPrintf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
197
tee/tee_test.go
Normal file
197
tee/tee_test.go
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
package tee_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
tee "git.wntrmute.dev/kyle/goutils/tee"
|
||||||
|
)
|
||||||
|
|
||||||
|
// captureStdout redirects os.Stdout for the duration of fn and returns what was written.
|
||||||
|
func captureStdout(t *testing.T, fn func()) string {
|
||||||
|
t.Helper()
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("pipe: %v", err)
|
||||||
|
}
|
||||||
|
old := os.Stdout
|
||||||
|
os.Stdout = w
|
||||||
|
defer func() { os.Stdout = old }()
|
||||||
|
|
||||||
|
fn()
|
||||||
|
|
||||||
|
// Close writer to unblock reader and restore stdout
|
||||||
|
_ = w.Close()
|
||||||
|
b, _ := io.ReadAll(r)
|
||||||
|
_ = r.Close()
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewOutEmpty_WritesToStdoutOnly(t *testing.T) {
|
||||||
|
teeInst, err := tee.NewOut("")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewOut: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out := captureStdout(t, func() {
|
||||||
|
var n int
|
||||||
|
if n, err = teeInst.Write([]byte("abc")); err != nil || n != 3 {
|
||||||
|
t.Fatalf("Write got n=%d err=%v", n, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, err = teeInst.Printf("-%d-", 7); err != nil || n != len("-7-") {
|
||||||
|
t.Fatalf("Printf got n=%d err=%v", n, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if out != "abc-7-" {
|
||||||
|
t.Fatalf("stdout = %q, want %q", out, "abc-7-")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewOutWithFile_WritesToBoth(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
logPath := filepath.Join(dir, "log.txt")
|
||||||
|
|
||||||
|
teeInst, err := tee.NewOut(logPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewOut: %v", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = teeInst.Close() }()
|
||||||
|
|
||||||
|
out := captureStdout(t, func() {
|
||||||
|
if _, err = teeInst.Write([]byte("x")); err != nil {
|
||||||
|
t.Fatalf("Write: %v", err)
|
||||||
|
}
|
||||||
|
if _, err = teeInst.Printf("%s", "y"); err != nil {
|
||||||
|
t.Fatalf("Printf: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if out != "xy" {
|
||||||
|
t.Fatalf("stdout = %q, want %q", out, "xy")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close to flush and release the file before reading
|
||||||
|
if err = teeInst.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(logPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile: %v", err)
|
||||||
|
}
|
||||||
|
if string(data) != "xy" {
|
||||||
|
t.Fatalf("file content = %q, want %q", string(data), "xy")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVPrintf_VerboseToggle(t *testing.T) {
|
||||||
|
teeInst := &tee.Tee{} // stdout only
|
||||||
|
|
||||||
|
out := captureStdout(t, func() {
|
||||||
|
if n, err := teeInst.VPrintf("hello"); err != nil || n != 0 {
|
||||||
|
t.Fatalf("VPrintf (quiet) got n=%d err=%v", n, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if out != "" {
|
||||||
|
t.Fatalf("stdout = %q, want empty when not verbose", out)
|
||||||
|
}
|
||||||
|
|
||||||
|
teeInst.Verbose = true
|
||||||
|
out = captureStdout(t, func() {
|
||||||
|
if n, err := teeInst.VPrintf("%s", "hello"); err != nil || n != len("hello") {
|
||||||
|
t.Fatalf("VPrintf (verbose) got n=%d err=%v", n, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if out != "hello" {
|
||||||
|
t.Fatalf("stdout = %q, want %q", out, "hello")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrite_StdoutErrorDoesNotWriteToFile(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
logPath := filepath.Join(dir, "log.txt")
|
||||||
|
teeInst, err := tee.NewOut(logPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewOut: %v", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = teeInst.Close() }()
|
||||||
|
|
||||||
|
// Replace stdout with a closed pipe writer to force write error.
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("pipe: %v", err)
|
||||||
|
}
|
||||||
|
old := os.Stdout
|
||||||
|
os.Stdout = w
|
||||||
|
_ = w.Close() // immediately close to cause EPIPE on write
|
||||||
|
defer func() {
|
||||||
|
os.Stdout = old
|
||||||
|
_ = r.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var n int
|
||||||
|
if n, err = teeInst.Write([]byte("abc")); err == nil {
|
||||||
|
t.Fatalf("expected error writing to closed stdout, got n=%d err=nil", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure file remained empty because stdout write failed first.
|
||||||
|
_ = teeInst.Close()
|
||||||
|
data, err := os.ReadFile(logPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile: %v", err)
|
||||||
|
}
|
||||||
|
if len(data) != 0 {
|
||||||
|
t.Fatalf("file content = %q, want empty due to stdout failure", string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGlobal_OpenPrintfVPrintfClose(t *testing.T) {
|
||||||
|
// Ensure a clean slate for global tee
|
||||||
|
_ = tee.Close()
|
||||||
|
tee.SetVerbose(false)
|
||||||
|
|
||||||
|
dir := t.TempDir()
|
||||||
|
logPath := filepath.Join(dir, "glog.txt")
|
||||||
|
|
||||||
|
if err := tee.Open(logPath); err != nil {
|
||||||
|
t.Fatalf("Open: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out := captureStdout(t, func() {
|
||||||
|
if _, err := tee.Printf("A"); err != nil {
|
||||||
|
t.Fatalf("Printf: %v", err)
|
||||||
|
}
|
||||||
|
// Not verbose yet, should not print
|
||||||
|
if n, err := tee.VPrintf("B"); err != nil || n != 0 {
|
||||||
|
t.Fatalf("VPrintf (quiet) n=%d err=%v", n, err)
|
||||||
|
}
|
||||||
|
tee.SetVerbose(true)
|
||||||
|
if _, err := tee.VPrintf("C%d", 1); err != nil {
|
||||||
|
t.Fatalf("VPrintf (verbose): %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if out != "AC1" {
|
||||||
|
t.Fatalf("stdout = %q, want %q", out, "AC1")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tee.Close(); err != nil {
|
||||||
|
t.Fatalf("Close: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(logPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadFile: %v", err)
|
||||||
|
}
|
||||||
|
if string(data) != "AC1" {
|
||||||
|
t.Fatalf("file content = %q, want %q", string(data), "AC1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset global tee for other tests/packages
|
||||||
|
_ = tee.Close()
|
||||||
|
tee.SetVerbose(false)
|
||||||
|
}
|
||||||
@@ -169,6 +169,26 @@ type BufCloser struct {
|
|||||||
buf *bytes.Buffer
|
buf *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBufCloser creates and initializes a new BufCloser using buf as
|
||||||
|
// its initial contents. It is intended to prepare a BufCloser to read
|
||||||
|
// existing data. It can also be used to size the internal buffer for
|
||||||
|
// writing. To do that, buf should have the desired capacity but a
|
||||||
|
// length of zero.
|
||||||
|
func NewBufCloser(buf []byte) *BufCloser {
|
||||||
|
bc := new(BufCloser)
|
||||||
|
bc.buf = bytes.NewBuffer(buf)
|
||||||
|
return bc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufCloserString creates and initializes a new Buffer using
|
||||||
|
// string s as its initial contents. It is intended to prepare a
|
||||||
|
// buffer to read an existing string.
|
||||||
|
func NewBufCloserString(s string) *BufCloser {
|
||||||
|
buf := new(BufCloser)
|
||||||
|
buf.buf = bytes.NewBufferString(s)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
// Write writes the data to the BufCloser.
|
// Write writes the data to the BufCloser.
|
||||||
func (buf *BufCloser) Write(p []byte) (int, error) {
|
func (buf *BufCloser) Write(p []byte) (int, error) {
|
||||||
return buf.buf.Write(p)
|
return buf.buf.Write(p)
|
||||||
@@ -199,26 +219,6 @@ func (buf *BufCloser) Len() int {
|
|||||||
return buf.buf.Len()
|
return buf.buf.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBufCloser creates and initializes a new BufCloser using buf as
|
|
||||||
// its initial contents. It is intended to prepare a BufCloser to read
|
|
||||||
// existing data. It can also be used to size the internal buffer for
|
|
||||||
// writing. To do that, buf should have the desired capacity but a
|
|
||||||
// length of zero.
|
|
||||||
func NewBufCloser(buf []byte) *BufCloser {
|
|
||||||
bc := new(BufCloser)
|
|
||||||
bc.buf = bytes.NewBuffer(buf)
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBufCloserString creates and initializes a new Buffer using
|
|
||||||
// string s as its initial contents. It is intended to prepare a
|
|
||||||
// buffer to read an existing string.
|
|
||||||
func NewBufCloserString(s string) *BufCloser {
|
|
||||||
buf := new(BufCloser)
|
|
||||||
buf.buf = bytes.NewBufferString(s)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// A LoggingBuffer is an io.ReadWriter that prints the hex value of
|
// A LoggingBuffer is an io.ReadWriter that prints the hex value of
|
||||||
// the data for all reads and writes.
|
// the data for all reads and writes.
|
||||||
type LoggingBuffer struct {
|
type LoggingBuffer struct {
|
||||||
@@ -323,6 +323,26 @@ type BrokenCloser struct {
|
|||||||
buf *bytes.Buffer
|
buf *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBrokenCloser creates and initializes a new BrokenCloser using buf as
|
||||||
|
// its initial contents. It is intended to prepare a BrokenCloser to read
|
||||||
|
// existing data. It can also be used to size the internal buffer for
|
||||||
|
// writing. To do that, buf should have the desired capacity but a
|
||||||
|
// length of zero.
|
||||||
|
func NewBrokenCloser(buf []byte) *BrokenCloser {
|
||||||
|
bc := new(BrokenCloser)
|
||||||
|
bc.buf = bytes.NewBuffer(buf)
|
||||||
|
return bc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBrokenCloserString creates and initializes a new Buffer using
|
||||||
|
// string s as its initial contents. It is intended to prepare a
|
||||||
|
// buffer to read an existing string.
|
||||||
|
func NewBrokenCloserString(s string) *BrokenCloser {
|
||||||
|
buf := new(BrokenCloser)
|
||||||
|
buf.buf = bytes.NewBufferString(s)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
// Write writes the data to the BrokenCloser.
|
// Write writes the data to the BrokenCloser.
|
||||||
func (buf *BrokenCloser) Write(p []byte) (int, error) {
|
func (buf *BrokenCloser) Write(p []byte) (int, error) {
|
||||||
return buf.buf.Write(p)
|
return buf.buf.Write(p)
|
||||||
@@ -347,23 +367,3 @@ func (buf *BrokenCloser) Reset() {
|
|||||||
func (buf *BrokenCloser) Bytes() []byte {
|
func (buf *BrokenCloser) Bytes() []byte {
|
||||||
return buf.buf.Bytes()
|
return buf.buf.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBrokenCloser creates and initializes a new BrokenCloser using buf as
|
|
||||||
// its initial contents. It is intended to prepare a BrokenCloser to read
|
|
||||||
// existing data. It can also be used to size the internal buffer for
|
|
||||||
// writing. To do that, buf should have the desired capacity but a
|
|
||||||
// length of zero.
|
|
||||||
func NewBrokenCloser(buf []byte) *BrokenCloser {
|
|
||||||
bc := new(BrokenCloser)
|
|
||||||
bc.buf = bytes.NewBuffer(buf)
|
|
||||||
return bc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBrokenCloserString creates and initializes a new Buffer using
|
|
||||||
// string s as its initial contents. It is intended to prepare a
|
|
||||||
// buffer to read an existing string.
|
|
||||||
func NewBrokenCloserString(s string) *BrokenCloser {
|
|
||||||
buf := new(BrokenCloser)
|
|
||||||
buf.buf = bytes.NewBufferString(s)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,13 +1,15 @@
|
|||||||
package testio
|
package testio_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/testio"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBrokenWriter(t *testing.T) {
|
func TestBrokenWriter(t *testing.T) {
|
||||||
buf := NewBrokenWriter(2)
|
buf := testio.NewBrokenWriter(2)
|
||||||
data := []byte{1, 2}
|
data := []byte{1, 2}
|
||||||
|
|
||||||
n, err := buf.Write(data)
|
n, err := buf.Write(data)
|
||||||
@@ -39,7 +41,7 @@ func TestBufCloser(t *testing.T) {
|
|||||||
var data = []byte{1, 2}
|
var data = []byte{1, 2}
|
||||||
var read = make([]byte, 2)
|
var read = make([]byte, 2)
|
||||||
|
|
||||||
buf := NewBufCloser(data)
|
buf := testio.NewBufCloser(data)
|
||||||
_, err := buf.Read(read)
|
_, err := buf.Read(read)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
@@ -54,7 +56,7 @@ func TestBufCloser(t *testing.T) {
|
|||||||
buf.Reset()
|
buf.Reset()
|
||||||
|
|
||||||
s := "hi"
|
s := "hi"
|
||||||
buf = NewBufCloserString(s)
|
buf = testio.NewBufCloserString(s)
|
||||||
|
|
||||||
read = buf.Bytes()
|
read = buf.Bytes()
|
||||||
if string(read) != s {
|
if string(read) != s {
|
||||||
@@ -65,7 +67,7 @@ func TestBufCloser(t *testing.T) {
|
|||||||
func TestLoggingBuffer(t *testing.T) {
|
func TestLoggingBuffer(t *testing.T) {
|
||||||
src := &bytes.Buffer{}
|
src := &bytes.Buffer{}
|
||||||
data := []byte("AB")
|
data := []byte("AB")
|
||||||
lb := NewLoggingBuffer(src)
|
lb := testio.NewLoggingBuffer(src)
|
||||||
_, err := lb.Write(data)
|
_, err := lb.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
@@ -82,8 +84,8 @@ func TestLoggingBuffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expected := "[TEST] [WRITE] 4142\n"
|
expected := "[TEST] [WRITE] 4142\n"
|
||||||
if string(out.Bytes()) != expected {
|
if out.String() != expected {
|
||||||
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
|
t.Fatalf("expected '%s', have '%s'", expected, out.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
out.Reset()
|
out.Reset()
|
||||||
@@ -96,8 +98,8 @@ func TestLoggingBuffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expected = "[TEST] [READ] 4142\n"
|
expected = "[TEST] [READ] 4142\n"
|
||||||
if string(out.Bytes()) != expected {
|
if out.String() != expected {
|
||||||
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
|
t.Fatalf("expected '%s', have '%s'", expected, out.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
out.Reset()
|
out.Reset()
|
||||||
@@ -112,8 +114,8 @@ func TestLoggingBuffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expected = "[READ] 4142\n"
|
expected = "[READ] 4142\n"
|
||||||
if string(out.Bytes()) != expected {
|
if out.String() != expected {
|
||||||
t.Fatalf("expected '%s', have '%s'", expected, string(out.Bytes()))
|
t.Fatalf("expected '%s', have '%s'", expected, out.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
src.Reset()
|
src.Reset()
|
||||||
@@ -124,8 +126,8 @@ func TestLoggingBuffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBrokenReadWriter(t *testing.T) {
|
func TestBrokenReadWriter(t *testing.T) {
|
||||||
brw := NewBrokenReadWriter(0, 0)
|
brw := testio.NewBrokenReadWriter(0, 0)
|
||||||
lb := NewLoggingBuffer(brw)
|
lb := testio.NewLoggingBuffer(brw)
|
||||||
|
|
||||||
var p = make([]byte, 2)
|
var p = make([]byte, 2)
|
||||||
var data = []byte("HI")
|
var data = []byte("HI")
|
||||||
@@ -177,7 +179,7 @@ func TestBrokenReadWriter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBufferConn(t *testing.T) {
|
func TestBufferConn(t *testing.T) {
|
||||||
bc := NewBufferConn()
|
bc := testio.NewBufferConn()
|
||||||
|
|
||||||
client := []byte("AB")
|
client := []byte("AB")
|
||||||
peer := []byte("XY")
|
peer := []byte("XY")
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
package testutil
|
|
||||||
|
|
||||||
import "io/ioutil"
|
|
||||||
|
|
||||||
// TempName generates a new temporary file name. The caller should
|
|
||||||
// remove the temporary file when done.
|
|
||||||
func TempName() (string, error) {
|
|
||||||
tmpf, err := ioutil.TempFile("", "transport_cachedkp_")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := tmpf.Name()
|
|
||||||
tmpf.Close()
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user