Compare commits
86 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8ca8538268 | |||
| 155c49cc5e | |||
| dda9fd9f07 | |||
| c251c1e1b5 | |||
| 6eb533f79b | |||
| ea5ffa4828 | |||
| aa96e47112 | |||
| d34a417dce | |||
| d11e0cf9f9 | |||
| aad7d68599 | |||
| 4560868688 | |||
| 8d5406256f | |||
| 9280e846fa | |||
| 0a71661901 | |||
| 804f53d27d | |||
| cfb80355bb | |||
| 77160395a0 | |||
| 37d5e04421 | |||
| dc54eeacbc | |||
| e2a3081ce5 | |||
| 3149d958f4 | |||
| f296344acf | |||
| 3fb2d88a3f | |||
| 150c02b377 | |||
| 83f88c49fe | |||
| 7c437ac45f | |||
| c999bf35b0 | |||
| 4dc135cfe0 | |||
| 790113e189 | |||
| 8348c5fd65 | |||
| 1eafb638a8 | |||
| 3ad562b6fa | |||
| 0f77bd49dc | |||
| f31d74243f | |||
| a573f1cd20 | |||
| f93cf5fa9c | |||
| b879d62384 | |||
| c99ffd4394 | |||
| ed8c07c1c5 | |||
| cf2b016433 | |||
| 2899885c42 | |||
| f3b4838cf6 | |||
| 8ed30e9960 | |||
| c7de3919b0 | |||
| 840066004a | |||
| 9fb93a3802 | |||
| ecc7e5ab1e | |||
| a934c42aa1 | |||
| 948986ba60 | |||
| 3be86573aa | |||
| e3a6355edb | |||
| 66d16acebc | |||
| fdff2e0afe | |||
| 0dcd18c6f1 | |||
| 024d552293 | |||
| 9cd2ced695 | |||
| b92e16fa4d | |||
| 6fbdece4be | |||
| 619c08a13f | |||
| 944a57bf0e | |||
| 0857b29624 | |||
|
|
e95404bfc5 | ||
|
|
924654e7c4 | ||
| 9e0979e07f | |||
|
|
bbc82ff8de | ||
|
|
5fd928f69a | ||
|
|
acefe4a3b9 | ||
| a1452cebc9 | |||
| 6e9812e6f5 | |||
|
|
8c34415c34 | ||
|
|
2cf2c15def | ||
|
|
eaad1884d4 | ||
| 5d57d844d4 | |||
|
|
31b9d175dd | ||
|
|
79e106da2e | ||
|
|
939b1bc272 | ||
|
|
89e74f390b | ||
|
|
7881b6fdfc | ||
|
|
5bef33245f | ||
|
|
84250b0501 | ||
|
|
459e9f880f | ||
|
|
0982f47ce3 | ||
|
|
1dec15fd11 | ||
|
|
2ee9cae5ba | ||
|
|
dc04475120 | ||
|
|
dbbd5116b5 |
@@ -64,4 +64,4 @@ workflows:
|
||||
testbuild:
|
||||
jobs:
|
||||
- testbuild
|
||||
# - lint
|
||||
- lint
|
||||
|
||||
35
.github/workflows/release.yml
vendored
Normal file
35
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
name: GoReleaser
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: true
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
.idea
|
||||
cmd/cert-bundler/testdata/pkg/*
|
||||
# Added by goreleaser init:
|
||||
dist/
|
||||
cmd/cert-bundler/testdata/bundle/
|
||||
|
||||
@@ -12,12 +12,31 @@
|
||||
|
||||
version: "2"
|
||||
|
||||
output:
|
||||
sort-order:
|
||||
- file
|
||||
- linter
|
||||
- severity
|
||||
|
||||
issues:
|
||||
# Maximum count of issues with the same text.
|
||||
# Set to 0 to disable.
|
||||
# Default: 3
|
||||
max-same-issues: 50
|
||||
|
||||
# Exclude some lints for CLI programs under cmd/ (package main).
|
||||
# The project allows fmt.Print* in command-line tools; keep forbidigo for libraries.
|
||||
exclude-rules:
|
||||
- path: ^cmd/
|
||||
linters:
|
||||
- forbidigo
|
||||
- path: cmd/.*
|
||||
linters:
|
||||
- forbidigo
|
||||
- path: .*/cmd/.*
|
||||
linters:
|
||||
- forbidigo
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports # checks if the code and import statements are formatted according to the 'goimports' command
|
||||
@@ -73,7 +92,6 @@ linters:
|
||||
- godoclint # checks Golang's documentation practice
|
||||
- godot # checks if comments end in a period
|
||||
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
|
||||
- goprintffuncname # checks that printf-like functions are named with f at the end
|
||||
- gosec # inspects source code for security problems
|
||||
- govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
|
||||
- iface # checks the incorrect use of interfaces, helping developers avoid interface pollution
|
||||
@@ -228,6 +246,12 @@ linters:
|
||||
# Such cases aren't reported by default.
|
||||
# Default: false
|
||||
check-type-assertions: true
|
||||
exclude-functions:
|
||||
- (*git.wntrmute.dev/kyle/goutils/sbuf.Buffer).Write
|
||||
- git.wntrmute.dev/kyle/goutils/lib.Warn
|
||||
- git.wntrmute.dev/kyle/goutils/lib.Warnx
|
||||
- git.wntrmute.dev/kyle/goutils/lib.Err
|
||||
- git.wntrmute.dev/kyle/goutils/lib.Errx
|
||||
|
||||
exhaustive:
|
||||
# Program elements to check for exhaustiveness.
|
||||
@@ -319,6 +343,12 @@ linters:
|
||||
# https://github.com/godoc-lint/godoc-lint?tab=readme-ov-file#no-unused-link
|
||||
- no-unused-link
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G104 # handled by errcheck
|
||||
- G301
|
||||
- G306
|
||||
|
||||
govet:
|
||||
# Enable all analyzers.
|
||||
# Default: false
|
||||
@@ -341,11 +371,6 @@ linters:
|
||||
skip-single-param: true
|
||||
|
||||
mnd:
|
||||
# List of function patterns to exclude from analysis.
|
||||
# Values always ignored: `time.Date`,
|
||||
# `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
|
||||
# `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
|
||||
# Default: []
|
||||
ignored-functions:
|
||||
- args.Error
|
||||
- flag.Arg
|
||||
@@ -359,6 +384,15 @@ linters:
|
||||
- os.WriteFile
|
||||
- prometheus.ExponentialBuckets.*
|
||||
- prometheus.LinearBuckets
|
||||
ignored-numbers:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 8
|
||||
- 24
|
||||
- 30
|
||||
- 365
|
||||
|
||||
nakedret:
|
||||
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
||||
@@ -427,6 +461,10 @@ linters:
|
||||
# Omit embedded fields from selector expression.
|
||||
# https://staticcheck.dev/docs/checks/#QF1008
|
||||
- -QF1008
|
||||
# We often explicitly enable old/deprecated ciphers for research.
|
||||
- -SA1019
|
||||
# Covered by revive.
|
||||
- -ST1003
|
||||
|
||||
usetesting:
|
||||
# Enable/disable `os.TempDir()` detections.
|
||||
@@ -445,10 +483,20 @@ linters:
|
||||
rules:
|
||||
- path: 'ahash/ahash.go'
|
||||
linters: [ staticcheck, gosec ]
|
||||
- path: 'twofactor/.*.go'
|
||||
linters: [ exhaustive, mnd, revive ]
|
||||
- path: 'backoff/backoff_test.go'
|
||||
linters: [ testpackage ]
|
||||
- path: 'dbg/dbg_test.go'
|
||||
linters: [ testpackage ]
|
||||
- path: 'log/logger.go'
|
||||
linters: [ forbidigo ]
|
||||
- path: 'logging/example_test.go'
|
||||
linters: [ testableexamples ]
|
||||
- path: 'main.go'
|
||||
linters: [ forbidigo, mnd, reassign ]
|
||||
- path: 'cmd/cruntar/main.go'
|
||||
linters: [ unparam ]
|
||||
- source: 'TODO'
|
||||
linters: [ godot ]
|
||||
- text: 'should have a package comment'
|
||||
@@ -470,4 +518,5 @@ linters:
|
||||
- goconst
|
||||
- gosec
|
||||
- noctx
|
||||
- reassign
|
||||
- wrapcheck
|
||||
|
||||
456
.goreleaser.yaml
Normal file
456
.goreleaser.yaml
Normal file
@@ -0,0 +1,456 @@
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
|
||||
# The lines below are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/need to use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
# you may remove this if you don't need go generate
|
||||
- go generate ./...
|
||||
|
||||
builds:
|
||||
- id: atping
|
||||
main: ./cmd/atping/main.go
|
||||
binary: atping
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: ca-signed
|
||||
main: ./cmd/ca-signed/main.go
|
||||
binary: ca-signed
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cert-bundler
|
||||
main: ./cmd/cert-bundler/main.go
|
||||
binary: cert-bundler
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cert-revcheck
|
||||
main: ./cmd/cert-revcheck/main.go
|
||||
binary: cert-revcheck
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certchain
|
||||
main: ./cmd/certchain/main.go
|
||||
binary: certchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certdump
|
||||
main: ./cmd/certdump/main.go
|
||||
binary: certdump
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certexpiry
|
||||
main: ./cmd/certexpiry/main.go
|
||||
binary: certexpiry
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certser
|
||||
main: ./cmd/certser/main.go
|
||||
binary: certser
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certverify
|
||||
main: ./cmd/certverify/main.go
|
||||
binary: certverify
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: clustersh
|
||||
main: ./cmd/clustersh/main.go
|
||||
binary: clustersh
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cruntar
|
||||
main: ./cmd/cruntar/main.go
|
||||
binary: cruntar
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: csrpubdump
|
||||
main: ./cmd/csrpubdump/main.go
|
||||
binary: csrpubdump
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: data_sync
|
||||
main: ./cmd/data_sync/main.go
|
||||
binary: data_sync
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: diskimg
|
||||
main: ./cmd/diskimg/main.go
|
||||
binary: diskimg
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: dumpbytes
|
||||
main: ./cmd/dumpbytes/main.go
|
||||
binary: dumpbytes
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: eig
|
||||
main: ./cmd/eig/main.go
|
||||
binary: eig
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: fragment
|
||||
main: ./cmd/fragment/main.go
|
||||
binary: fragment
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: host
|
||||
main: ./cmd/host/main.go
|
||||
binary: host
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: jlp
|
||||
main: ./cmd/jlp/main.go
|
||||
binary: jlp
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: kgz
|
||||
main: ./cmd/kgz/main.go
|
||||
binary: kgz
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: minmax
|
||||
main: ./cmd/minmax/main.go
|
||||
binary: minmax
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: parts
|
||||
main: ./cmd/parts/main.go
|
||||
binary: parts
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pem2bin
|
||||
main: ./cmd/pem2bin/main.go
|
||||
binary: pem2bin
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pembody
|
||||
main: ./cmd/pembody/main.go
|
||||
binary: pembody
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pemit
|
||||
main: ./cmd/pemit/main.go
|
||||
binary: pemit
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: readchain
|
||||
main: ./cmd/readchain/main.go
|
||||
binary: readchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: renfnv
|
||||
main: ./cmd/renfnv/main.go
|
||||
binary: renfnv
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: rhash
|
||||
main: ./cmd/rhash/main.go
|
||||
binary: rhash
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: rolldie
|
||||
main: ./cmd/rolldie/main.go
|
||||
binary: rolldie
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: showimp
|
||||
main: ./cmd/showimp/main.go
|
||||
binary: showimp
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: ski
|
||||
main: ./cmd/ski/main.go
|
||||
binary: ski
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: sprox
|
||||
main: ./cmd/sprox/main.go
|
||||
binary: sprox
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: stealchain
|
||||
main: ./cmd/stealchain/main.go
|
||||
binary: stealchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: stealchain-server
|
||||
main: ./cmd/stealchain-server/main.go
|
||||
binary: stealchain-server
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: subjhash
|
||||
main: ./cmd/subjhash/main.go
|
||||
binary: subjhash
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: tlsinfo
|
||||
main: ./cmd/tlsinfo/main.go
|
||||
binary: tlsinfo
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: tlskeypair
|
||||
main: ./cmd/tlskeypair/main.go
|
||||
binary: tlskeypair
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: utc
|
||||
main: ./cmd/utc/main.go
|
||||
binary: utc
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: yamll
|
||||
main: ./cmd/yamll/main.go
|
||||
binary: yamll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: zsearch
|
||||
main: ./cmd/zsearch/main.go
|
||||
binary: zsearch
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
|
||||
archives:
|
||||
- formats: [tar.gz]
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
gitea_urls:
|
||||
api: https://git.wntrmute.dev/api/v1
|
||||
download: https://git.wntrmute.dev/
|
||||
# set to true if you use a self-signed certificate
|
||||
skip_tls_verify: false
|
||||
|
||||
release:
|
||||
github:
|
||||
owner: kyle
|
||||
name: goutils
|
||||
footer: >-
|
||||
|
||||
---
|
||||
|
||||
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
||||
154
CHANGELOG
154
CHANGELOG
@@ -1,27 +1,145 @@
|
||||
Release 1.2.1 - 2018-09-15
|
||||
CHANGELOG
|
||||
|
||||
+ Add missing format argument to Errorf call in kgz.
|
||||
v1.13.5 - 2025-11-18
|
||||
|
||||
Release 1.2.0 - 2018-09-15
|
||||
Changed:
|
||||
- build: updating goreleaser config.
|
||||
|
||||
+ Adds the kgz command line utility.
|
||||
v1.13.4 - 2025-11-18
|
||||
|
||||
Release 1.1.0 - 2017-11-16
|
||||
Changed:
|
||||
- build: updating goreleaser config.
|
||||
|
||||
+ A number of new command line utilities were added
|
||||
v1.13.3 - 2025-11-18
|
||||
|
||||
+ atping
|
||||
+ cruntar
|
||||
+ renfnv
|
||||
+
|
||||
+ ski
|
||||
+ subjhash
|
||||
+ yamll
|
||||
Added:
|
||||
- certlib: introduce `Fetcher` for retrieving certificates.
|
||||
- lib: `HexEncode` gains a byte-slice output variant.
|
||||
- build: add GoReleaser configuration.
|
||||
|
||||
+ new package: ahash
|
||||
+ package for loading hashes from an algorithm string
|
||||
Changed:
|
||||
- cmd: migrate programs to use `certlib.Fetcher` for certificate retrieval
|
||||
(includes `certdump`, `ski`, and others).
|
||||
- cmd/ski: update display mode.
|
||||
|
||||
+ new certificate loading functions in the lib package
|
||||
Misc:
|
||||
- repository fixups and small cleanups.
|
||||
|
||||
+ new package: tee
|
||||
+ emulates tee(1)
|
||||
v1.13.2 - 2025-11-17
|
||||
|
||||
Add:
|
||||
- certlib/bundler: refactor certificate bundling from cmd/cert-bundler
|
||||
into a separate package.
|
||||
|
||||
Changed:
|
||||
- cmd/cert-bundler: refactor to use bundler package, and update Dockerfile.
|
||||
|
||||
v1.13.1 - 2025-11-17
|
||||
|
||||
Add:
|
||||
- Dockerfile for cert-bundler.
|
||||
|
||||
v1.13.0 - 2025-11-16
|
||||
|
||||
Add:
|
||||
- cmd/certser: print serial numbers for certificates.
|
||||
- lib/HexEncode: add a new hex encode function handling multiple output
|
||||
formats, including with and without colons.
|
||||
|
||||
v1.12.4 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
|
||||
- Linting fixes for twofactor that were previously masked.
|
||||
|
||||
v1.12.3 erroneously tagged and pushed
|
||||
|
||||
v1.12.2 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
|
||||
- add rsc.io/qr dependency for twofactor.
|
||||
|
||||
v1.12.1 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
- twofactor: Remove go.{mod,sum}.
|
||||
|
||||
v1.12.0 - 2025-11-16
|
||||
|
||||
Added
|
||||
- twofactor: the github.com/kisom/twofactor repo has been subtree'd
|
||||
into this repo.
|
||||
|
||||
v1.11.2 - 2025-11-16
|
||||
|
||||
Changed
|
||||
- cmd/ski, cmd/csrpubdump, cmd/tlskeypair: centralize
|
||||
certificate/private-key/CSR parsing by reusing certlib helpers.
|
||||
This reduces duplication and improves consistency across commands.
|
||||
- csr: CSR parsing in the above commands now uses certlib.ParseCSR,
|
||||
which verifies CSR signatures (behavioral hardening compared to
|
||||
prior parsing without signature verification).
|
||||
|
||||
v1.11.1 - 2025-11-16
|
||||
|
||||
Changed
|
||||
- cmd: complete linting fixes across programs; no functional changes.
|
||||
|
||||
v1.11.0 - 2025-11-15
|
||||
|
||||
Added
|
||||
- cache/mru: introduce MRU cache implementation with timestamp utilities.
|
||||
|
||||
Changed
|
||||
- certlib: complete overhaul to simplify APIs and internals.
|
||||
- repo: widespread linting cleanups across many packages (config, dbg, die,
|
||||
fileutil, log/logging, mwc, sbuf, seekbuf, tee, testio, etc.).
|
||||
- cmd: general program cleanups; `cert-bundler` lint fixes.
|
||||
|
||||
Removed
|
||||
- rand: remove unused package.
|
||||
- testutil: remove unused code.
|
||||
|
||||
|
||||
v1.10.1 — 2025-11-15
|
||||
|
||||
Changed
|
||||
- certlib: major overhaul and refactor.
|
||||
- repo: linter autofixes ahead of release.
|
||||
|
||||
|
||||
v1.10.0 — 2025-11-14
|
||||
|
||||
Added
|
||||
- cmd: add `cert-revcheck` command.
|
||||
|
||||
Changed
|
||||
- ci/lint: add golangci-lint stage and initial cleanup.
|
||||
|
||||
|
||||
v1.9.1 — 2025-11-15
|
||||
|
||||
Fixed
|
||||
- die: correct calls to `die.With`.
|
||||
|
||||
|
||||
v1.9.0 — 2025-11-14
|
||||
|
||||
Added
|
||||
- cmd: add `cert-bundler` tool.
|
||||
|
||||
Changed
|
||||
- misc: minor updates and maintenance.
|
||||
|
||||
|
||||
v1.8.1 — 2025-11-14
|
||||
|
||||
Added
|
||||
- cmd: add `tlsinfo` tool.
|
||||
|
||||
|
||||
v1.8.0 — 2025-11-14
|
||||
|
||||
Baseline
|
||||
- Initial baseline for this changelog series.
|
||||
|
||||
197
LICENSE
197
LICENSE
@@ -1,19 +1,194 @@
|
||||
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
|
||||
Copyright 2025 K. Isom <kyle@imap.cc>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
=======================================================================
|
||||
|
||||
The backoff package (written during my time at Cloudflare) is released
|
||||
under the following license:
|
||||
|
||||
|
||||
85
README.md
85
README.md
@@ -2,39 +2,52 @@ GOUTILS
|
||||
|
||||
This is a collection of small utility code I've written in Go; the `cmd/`
|
||||
directory has a number of command-line utilities. Rather than keep all
|
||||
of these in superfluous repositories of their own, or rewriting them
|
||||
of these in superfluous repositories of their own or rewriting them
|
||||
for each project, I'm putting them here.
|
||||
|
||||
The project can be built with the standard Go tooling, or it can be built
|
||||
with Bazel.
|
||||
The project can be built with the standard Go tooling.
|
||||
|
||||
Contents:
|
||||
|
||||
ahash/ Provides hashes from string algorithm specifiers.
|
||||
assert/ Error handling, assertion-style.
|
||||
backoff/ Implementation of an intelligent backoff strategy.
|
||||
cache/ Implementations of various caches.
|
||||
lru/ Least-recently-used cache.
|
||||
mru/ Most-recently-used cache.
|
||||
certlib/ Library for working with TLS certificates.
|
||||
cmd/
|
||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||
certchain/ Display the certificate chain from a
|
||||
TLS connection.
|
||||
ca-signed/ Validate whether a certificate is signed by a CA.
|
||||
cert-bundler/
|
||||
Create certificate bundles from a source of PEM
|
||||
certificates.
|
||||
cert-revcheck/
|
||||
Check whether a certificate has been revoked or is
|
||||
expired.
|
||||
certchain/ Display the certificate chain from a TLS connection.
|
||||
certdump/ Dump certificate information.
|
||||
certexpiry/ Print a list of certificate subjects and expiry times
|
||||
or warn about certificates expiring within a certain
|
||||
window.
|
||||
certverify/ Verify a TLS X.509 certificate, optionally printing
|
||||
certverify/ Verify a TLS X.509 certificate file, optionally printing
|
||||
the time to expiry and checking for revocations.
|
||||
clustersh/ Run commands or transfer files across multiple
|
||||
servers via SSH.
|
||||
cruntar/ Untar an archive with hard links, copying instead of
|
||||
cruntar/ (Un)tar an archive with hard links, copying instead of
|
||||
linking.
|
||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||
data_sync/ Sync the user's homedir to external storage.
|
||||
diskimg/ Write a disk image to a device.
|
||||
dumpbytes/ Dump the contents of a file as hex bytes, printing it as
|
||||
a Go []byte literal.
|
||||
eig/ EEPROM image generator.
|
||||
fragment/ Print a fragment of a file.
|
||||
host/ Go imlpementation of the host(1) command.
|
||||
jlp/ JSON linter/prettifier.
|
||||
kgz/ Custom gzip compressor / decompressor that handles 99%
|
||||
of my use cases.
|
||||
minmax/ Generate a minmax code for use in uLisp.
|
||||
parts/ Simple parts database management for my collection of
|
||||
electronic components.
|
||||
pem2bin/ Dump the binary body of a PEM-encoded block.
|
||||
@@ -44,37 +57,79 @@ Contents:
|
||||
in a bundle.
|
||||
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
||||
rhash/ Compute the digest of remote files.
|
||||
rolldie/ Roll some dice.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
ski Display the SKI for PEM-encoded TLS material.
|
||||
sprox/ Simple TCP proxy.
|
||||
stealchain/ Dump the verified chain from a TLS
|
||||
connection to a server.
|
||||
stealchain- Dump the verified chain from a TLS
|
||||
server/ connection from a client.
|
||||
stealchain/ Dump the verified chain from a TLS connection to a
|
||||
server.
|
||||
stealchain-server/
|
||||
Dump the verified chain from a TLS connection from
|
||||
from a client.
|
||||
subjhash/ Print or match subject info from a certificate.
|
||||
tlsinfo/ Print information about a TLS connection (the TLS version
|
||||
and cipher suite).
|
||||
tlskeypair/ Check whether a TLS certificate and key file match.
|
||||
utc/ Convert times to UTC.
|
||||
yamll/ A small YAML linter.
|
||||
zsearch/ Search for a string in directory of gzipped files.
|
||||
config/ A simple global configuration system where configuration
|
||||
data is pulled from a file or an environment variable
|
||||
transparently.
|
||||
iniconf/ A simple INI-style configuration system.
|
||||
dbg/ A debug printer.
|
||||
die/ Death of a program.
|
||||
fileutil/ Common file functions.
|
||||
lib/ Commonly-useful functions for writing Go programs.
|
||||
log/ A syslog library.
|
||||
logging/ A logging library.
|
||||
mwc/ MultiwriteCloser implementation.
|
||||
rand/ Utilities for working with math/rand.
|
||||
sbuf/ A byte buffer that can be wiped.
|
||||
seekbuf/ A read-seekable byte buffer.
|
||||
syslog/ Syslog-type logging.
|
||||
tee/ Emulate tee(1)'s functionality in io.Writers.
|
||||
testio/ Various I/O utilities useful during testing.
|
||||
testutil/ Various utility functions useful during testing.
|
||||
|
||||
twofactor/ Two-factor authentication.
|
||||
|
||||
Each program should have a small README in the directory with more
|
||||
information.
|
||||
|
||||
All code here is licensed under the ISC license.
|
||||
All code here is licensed under the Apache 2.0 license.
|
||||
|
||||
Error handling
|
||||
--------------
|
||||
|
||||
This repo standardizes on Go 1.13+ error wrapping and matching. Libraries and
|
||||
CLIs should:
|
||||
|
||||
- Wrap causes with context using `fmt.Errorf("context: %w", err)`.
|
||||
- Use typed, structured errors from `certlib/certerr` for certificate-related
|
||||
operations. These include a typed `*certerr.Error` with `Source` and `Kind`.
|
||||
- Match errors programmatically:
|
||||
- `errors.Is(err, certerr.ErrEncryptedPrivateKey)` to detect sentinel states.
|
||||
- `errors.As(err, &e)` (where `var e *certerr.Error`) to inspect
|
||||
`e.Source`/`e.Kind`.
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil {
|
||||
// sentinel match:
|
||||
if errors.Is(err, certerr.ErrEmptyCertificate) {
|
||||
// handle empty input
|
||||
}
|
||||
|
||||
// typed error match
|
||||
var ce *certerr.Error
|
||||
if errors.As(err, &ce) {
|
||||
switch ce.Kind {
|
||||
case certerr.KindParse:
|
||||
// parse error handling
|
||||
case certerr.KindLoad:
|
||||
// file loading error handling
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const decay = 5 * time.Millisecond
|
||||
const decay = time.Second
|
||||
const maxDuration = 10 * time.Millisecond
|
||||
const interval = time.Millisecond
|
||||
|
||||
|
||||
179
cache/lru/lru.go
vendored
Normal file
179
cache/lru/lru.go
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
// Package lru implements a Least Recently Used cache.
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
type item[V any] struct {
|
||||
V V
|
||||
access int64
|
||||
}
|
||||
|
||||
// A Cache is a map that retains a limited number of items. It must be
|
||||
// initialized with New, providing a maximum capacity for the cache.
|
||||
// Only the least recently used items are retained.
|
||||
type Cache[K comparable, V any] struct {
|
||||
store map[K]*item[V]
|
||||
access *timestamps[K]
|
||||
cap int
|
||||
clock clock.Clock
|
||||
// All public methods that have the possibility of modifying the
|
||||
// cache should lock it.
|
||||
mtx *sync.Mutex
|
||||
}
|
||||
|
||||
// New must be used to create a new Cache.
|
||||
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||
return &Cache[K, V]{
|
||||
store: map[K]*item[V]{},
|
||||
access: newTimestamps[K](icap),
|
||||
cap: icap,
|
||||
clock: clock.New(),
|
||||
mtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||
type StringKeyCache[V any] struct {
|
||||
*Cache[string, V]
|
||||
}
|
||||
|
||||
// NewStringKeyCache creates a new LRU cache keyed by string.
|
||||
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) lock() {
|
||||
c.mtx.Lock()
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) unlock() {
|
||||
c.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
return len(c.store)
|
||||
}
|
||||
|
||||
// evict should remove the least-recently-used cache item.
|
||||
func (c *Cache[K, V]) evict() {
|
||||
if c.access.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
k := c.access.K(0)
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
// evictKey should remove the entry given by the key item.
|
||||
func (c *Cache[K, V]) evictKey(k K) {
|
||||
delete(c.store, k)
|
||||
i, ok := c.access.Find(k)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.access.Delete(i)
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) sanityCheck() {
|
||||
if len(c.store) != c.access.Len() {
|
||||
panic(fmt.Sprintf("LRU cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len()))
|
||||
}
|
||||
}
|
||||
|
||||
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||
// data structures are consistent. It is not normally required, and it
|
||||
// is primarily used in testing.
|
||||
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
if err := c.access.ConsistencyCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(c.store) != c.access.Len() {
|
||||
return fmt.Errorf("lru: cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len())
|
||||
}
|
||||
|
||||
for i := range c.access.ts {
|
||||
itm, ok := c.store[c.access.K(i)]
|
||||
if !ok {
|
||||
return errors.New("lru: key in access is not in store")
|
||||
}
|
||||
|
||||
if c.access.T(i) != itm.access {
|
||||
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||
itm.access, c.access.T(i))
|
||||
}
|
||||
}
|
||||
|
||||
if !sort.IsSorted(c.access) {
|
||||
return errors.New("lru: timestamps aren't sorted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store adds the value v to the cache under the k.
|
||||
func (c *Cache[K, V]) Store(k K, v V) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
if len(c.store) == c.cap {
|
||||
c.evict()
|
||||
}
|
||||
|
||||
if _, ok := c.store[k]; ok {
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
itm := &item[V]{
|
||||
V: v,
|
||||
access: c.clock.Now().UnixNano(),
|
||||
}
|
||||
|
||||
c.store[k] = itm
|
||||
c.access.Update(k, itm.access)
|
||||
}
|
||||
|
||||
// Get returns the value stored in the cache. If the item isn't present,
|
||||
// it will return false.
|
||||
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
itm, ok := c.store[k]
|
||||
if !ok {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
|
||||
c.store[k].access = c.clock.Now().UnixNano()
|
||||
c.access.Update(k, itm.access)
|
||||
return itm.V, true
|
||||
}
|
||||
|
||||
// Has returns true if the cache has an entry for k. It will not update
|
||||
// the timestamp on the item.
|
||||
func (c *Cache[K, V]) Has(k K) bool {
|
||||
// Don't need to lock as we don't modify anything.
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
_, ok := c.store[k]
|
||||
return ok
|
||||
}
|
||||
87
cache/lru/lru_internal_test.go
vendored
Normal file
87
cache/lru/lru_internal_test.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
// These tests mirror the MRU-style behavior present in this LRU package
|
||||
// implementation (eviction removes the most-recently-used entry).
|
||||
func TestBasicCacheEviction(t *testing.T) {
|
||||
mock := clock.NewMock()
|
||||
c := NewStringKeyCache[int](2)
|
||||
c.clock = mock
|
||||
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c.Len() != 0 {
|
||||
t.Fatal("cache should have size 0")
|
||||
}
|
||||
|
||||
c.evict()
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c.Store("raven", 1)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 1 {
|
||||
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("owl", 2)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("goat", 3)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
// Since this implementation evicts the most-recently-used item, inserting
|
||||
// "goat" when full evicts "owl" (the most recent at that time).
|
||||
mock.Add(time.Second)
|
||||
if _, ok := c.Get("owl"); ok {
|
||||
t.Fatal("store should not have an entry for owl (MRU-evicted)")
|
||||
}
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("elk", 4)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !c.Has("elk") {
|
||||
t.Fatal("store should contain an entry for 'elk'")
|
||||
}
|
||||
|
||||
// Before storing elk, keys were: raven (older), goat (newer). Evict MRU -> goat.
|
||||
if !c.Has("raven") {
|
||||
t.Fatal("store should contain an entry for 'raven'")
|
||||
}
|
||||
|
||||
if c.Has("goat") {
|
||||
t.Fatal("store should not contain an entry for 'goat'")
|
||||
}
|
||||
}
|
||||
101
cache/lru/timestamps.go
vendored
Normal file
101
cache/lru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||
// by timestamp.
|
||||
|
||||
type timestamp[K comparable] struct {
|
||||
t int64
|
||||
k K
|
||||
}
|
||||
|
||||
type timestamps[K comparable] struct {
|
||||
ts []timestamp[K]
|
||||
cap int
|
||||
}
|
||||
|
||||
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||
return ×tamps[K]{
|
||||
ts: make([]timestamp[K], 0, icap),
|
||||
cap: icap,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) K(i int) K {
|
||||
return ts.ts[i].k
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) T(i int) int64 {
|
||||
return ts.ts[i].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Len() int {
|
||||
return len(ts.ts)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||
return ts.ts[i].t > ts.ts[j].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Swap(i, j int) {
|
||||
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||
for i := range ts.ts {
|
||||
if ts.ts[i].k == k {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||
i, ok := ts.Find(k)
|
||||
if !ok {
|
||||
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||
sort.Sort(ts)
|
||||
return false
|
||||
}
|
||||
|
||||
ts.ts[i].t = t
|
||||
sort.Sort(ts)
|
||||
return true
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||
if !sort.IsSorted(ts) {
|
||||
return errors.New("lru: timestamps are not sorted")
|
||||
}
|
||||
|
||||
keys := map[K]bool{}
|
||||
for i := range ts.ts {
|
||||
if keys[ts.ts[i].k] {
|
||||
return fmt.Errorf("lru: duplicate key %v detected", ts.ts[i].k)
|
||||
}
|
||||
keys[ts.ts[i].k] = true
|
||||
}
|
||||
|
||||
if len(keys) != len(ts.ts) {
|
||||
return fmt.Errorf("lru: timestamp contains %d duplicate keys",
|
||||
len(ts.ts)-len(keys))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Delete(i int) {
|
||||
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||
for i := range ts.ts {
|
||||
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||
}
|
||||
}
|
||||
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
// These tests validate timestamps ordering semantics for the LRU package.
|
||||
// Note: The LRU timestamps are sorted with most-recent-first (descending by t).
|
||||
func TestTimestamps(t *testing.T) {
|
||||
ts := newTimestamps[string](3)
|
||||
mock := clock.NewMock()
|
||||
|
||||
// raven
|
||||
ts.Update("raven", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl
|
||||
mock.Add(time.Millisecond)
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl, goat
|
||||
mock.Add(time.Second)
|
||||
ts.Update("goat", mock.Now().UnixNano())
|
||||
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make owl the most recent
|
||||
mock.Add(time.Millisecond)
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// For LRU timestamps: most recent first. Expected order: owl, goat, raven.
|
||||
if ts.K(0) != "owl" {
|
||||
t.Fatalf("first key should be owl, have %s", ts.K(0))
|
||||
}
|
||||
|
||||
if ts.K(1) != "goat" {
|
||||
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||
}
|
||||
|
||||
if ts.K(2) != "raven" {
|
||||
t.Fatalf("third key should be raven, have %s", ts.K(2))
|
||||
}
|
||||
}
|
||||
178
cache/mru/mru.go
vendored
Normal file
178
cache/mru/mru.go
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
type item[V any] struct {
|
||||
V V
|
||||
access int64
|
||||
}
|
||||
|
||||
// A Cache is a map that retains a limited number of items. It must be
|
||||
// initialized with New, providing a maximum capacity for the cache.
|
||||
// Only the most recently used items are retained.
|
||||
type Cache[K comparable, V any] struct {
|
||||
store map[K]*item[V]
|
||||
access *timestamps[K]
|
||||
cap int
|
||||
clock clock.Clock
|
||||
// All public methods that have the possibility of modifying the
|
||||
// cache should lock it.
|
||||
mtx *sync.Mutex
|
||||
}
|
||||
|
||||
// New must be used to create a new Cache.
|
||||
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||
return &Cache[K, V]{
|
||||
store: map[K]*item[V]{},
|
||||
access: newTimestamps[K](icap),
|
||||
cap: icap,
|
||||
clock: clock.New(),
|
||||
mtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||
type StringKeyCache[V any] struct {
|
||||
*Cache[string, V]
|
||||
}
|
||||
|
||||
// NewStringKeyCache creates a new MRU cache keyed by string.
|
||||
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) lock() {
|
||||
c.mtx.Lock()
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) unlock() {
|
||||
c.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
return len(c.store)
|
||||
}
|
||||
|
||||
// evict should remove the least-recently-used cache item.
|
||||
func (c *Cache[K, V]) evict() {
|
||||
if c.access.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
k := c.access.K(0)
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
// evictKey should remove the entry given by the key item.
|
||||
func (c *Cache[K, V]) evictKey(k K) {
|
||||
delete(c.store, k)
|
||||
i, ok := c.access.Find(k)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.access.Delete(i)
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) sanityCheck() {
|
||||
if len(c.store) != c.access.Len() {
|
||||
panic(fmt.Sprintf("MRU cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len()))
|
||||
}
|
||||
}
|
||||
|
||||
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||
// data structures are consistent. It is not normally required, and it
|
||||
// is primarily used in testing.
|
||||
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
if err := c.access.ConsistencyCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(c.store) != c.access.Len() {
|
||||
return fmt.Errorf("mru: cache is out of sync; store len = %d, access len = %d",
|
||||
len(c.store), c.access.Len())
|
||||
}
|
||||
|
||||
for i := range c.access.ts {
|
||||
itm, ok := c.store[c.access.K(i)]
|
||||
if !ok {
|
||||
return errors.New("mru: key in access is not in store")
|
||||
}
|
||||
|
||||
if c.access.T(i) != itm.access {
|
||||
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||
itm.access, c.access.T(i))
|
||||
}
|
||||
}
|
||||
|
||||
if !sort.IsSorted(c.access) {
|
||||
return errors.New("mru: timestamps aren't sorted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store adds the value v to the cache under the k.
|
||||
func (c *Cache[K, V]) Store(k K, v V) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
if len(c.store) == c.cap {
|
||||
c.evict()
|
||||
}
|
||||
|
||||
if _, ok := c.store[k]; ok {
|
||||
c.evictKey(k)
|
||||
}
|
||||
|
||||
itm := &item[V]{
|
||||
V: v,
|
||||
access: c.clock.Now().UnixNano(),
|
||||
}
|
||||
|
||||
c.store[k] = itm
|
||||
c.access.Update(k, itm.access)
|
||||
}
|
||||
|
||||
// Get returns the value stored in the cache. If the item isn't present,
|
||||
// it will return false.
|
||||
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||
c.lock()
|
||||
defer c.unlock()
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
itm, ok := c.store[k]
|
||||
if !ok {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
|
||||
c.store[k].access = c.clock.Now().UnixNano()
|
||||
c.access.Update(k, itm.access)
|
||||
return itm.V, true
|
||||
}
|
||||
|
||||
// Has returns true if the cache has an entry for k. It will not update
|
||||
// the timestamp on the item.
|
||||
func (c *Cache[K, V]) Has(k K) bool {
|
||||
// Don't need to lock as we don't modify anything.
|
||||
|
||||
c.sanityCheck()
|
||||
|
||||
_, ok := c.store[k]
|
||||
return ok
|
||||
}
|
||||
92
cache/mru/mru_internal_test.go
vendored
Normal file
92
cache/mru/mru_internal_test.go
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
func TestBasicCacheEviction(t *testing.T) {
|
||||
mock := clock.NewMock()
|
||||
c := NewStringKeyCache[int](2)
|
||||
c.clock = mock
|
||||
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c.Len() != 0 {
|
||||
t.Fatal("cache should have size 0")
|
||||
}
|
||||
|
||||
c.evict()
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c.Store("raven", 1)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 1 {
|
||||
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("owl", 2)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("goat", 3)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.store) != 2 {
|
||||
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
v, ok := c.Get("owl")
|
||||
if !ok {
|
||||
t.Fatal("store should have an entry for owl")
|
||||
}
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
itm := v
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if itm != 2 {
|
||||
t.Fatalf("stored item should be 2, have %d", itm)
|
||||
}
|
||||
|
||||
mock.Add(time.Second)
|
||||
c.Store("elk", 4)
|
||||
if err := c.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !c.Has("elk") {
|
||||
t.Fatal("store should contain an entry for 'elk'")
|
||||
}
|
||||
|
||||
if !c.Has("owl") {
|
||||
t.Fatal("store should contain an entry for 'owl'")
|
||||
}
|
||||
|
||||
if c.Has("goat") {
|
||||
t.Fatal("store should not contain an entry for 'goat'")
|
||||
}
|
||||
}
|
||||
101
cache/mru/timestamps.go
vendored
Normal file
101
cache/mru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||
// by timestamp.
|
||||
|
||||
type timestamp[K comparable] struct {
|
||||
t int64
|
||||
k K
|
||||
}
|
||||
|
||||
type timestamps[K comparable] struct {
|
||||
ts []timestamp[K]
|
||||
cap int
|
||||
}
|
||||
|
||||
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||
return ×tamps[K]{
|
||||
ts: make([]timestamp[K], 0, icap),
|
||||
cap: icap,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) K(i int) K {
|
||||
return ts.ts[i].k
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) T(i int) int64 {
|
||||
return ts.ts[i].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Len() int {
|
||||
return len(ts.ts)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||
return ts.ts[i].t < ts.ts[j].t
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Swap(i, j int) {
|
||||
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||
for i := range ts.ts {
|
||||
if ts.ts[i].k == k {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||
i, ok := ts.Find(k)
|
||||
if !ok {
|
||||
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||
sort.Sort(ts)
|
||||
return false
|
||||
}
|
||||
|
||||
ts.ts[i].t = t
|
||||
sort.Sort(ts)
|
||||
return true
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||
if !sort.IsSorted(ts) {
|
||||
return errors.New("mru: timestamps are not sorted")
|
||||
}
|
||||
|
||||
keys := map[K]bool{}
|
||||
for i := range ts.ts {
|
||||
if keys[ts.ts[i].k] {
|
||||
return fmt.Errorf("duplicate key %v detected", ts.ts[i].k)
|
||||
}
|
||||
keys[ts.ts[i].k] = true
|
||||
}
|
||||
|
||||
if len(keys) != len(ts.ts) {
|
||||
return fmt.Errorf("mru: timestamp contains %d duplicate keys",
|
||||
len(ts.ts)-len(keys))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Delete(i int) {
|
||||
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||
}
|
||||
|
||||
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||
for i := range ts.ts {
|
||||
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||
}
|
||||
}
|
||||
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
func TestTimestamps(t *testing.T) {
|
||||
ts := newTimestamps[string](3)
|
||||
mock := clock.NewMock()
|
||||
|
||||
// raven
|
||||
ts.Update("raven", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl
|
||||
mock.Add(time.Millisecond)
|
||||
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
|
||||
// raven, owl, goat
|
||||
mock.Add(time.Second)
|
||||
ts.Update("goat", mock.Now().UnixNano())
|
||||
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mock.Add(time.Millisecond)
|
||||
|
||||
// raven, goat, owl
|
||||
ts.Update("owl", mock.Now().UnixNano())
|
||||
if err := ts.ConsistencyCheck(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// at this point, the keys should be raven, goat, owl.
|
||||
if ts.K(0) != "raven" {
|
||||
t.Fatalf("first key should be raven, have %s", ts.K(0))
|
||||
}
|
||||
|
||||
if ts.K(1) != "goat" {
|
||||
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||
}
|
||||
|
||||
if ts.K(2) != "owl" {
|
||||
t.Fatalf("third key should be owl, have %s", ts.K(2))
|
||||
}
|
||||
}
|
||||
677
certlib/bundler/bundler.go
Normal file
677
certlib/bundler/bundler.go
Normal file
@@ -0,0 +1,677 @@
|
||||
package bundler
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
)
|
||||
|
||||
const defaultFileMode = 0644
|
||||
|
||||
// Config represents the top-level YAML configuration.
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
Expiry string `yaml:"expiry"`
|
||||
} `yaml:"config"`
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains.
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates.
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options.
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
Manifest bool `yaml:"manifest"`
|
||||
Formats []string `yaml:"formats"`
|
||||
Encoding string `yaml:"encoding"`
|
||||
}
|
||||
|
||||
var formatExtensions = map[string]string{
|
||||
"zip": ".zip",
|
||||
"tgz": ".tar.gz",
|
||||
}
|
||||
|
||||
// Run performs the bundling operation given a config file path and an output directory.
|
||||
func Run(configFile string, outputDir string) error {
|
||||
if configFile == "" {
|
||||
return errors.New("configuration file required")
|
||||
}
|
||||
|
||||
cfg, err := loadConfig(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
expiryDuration := 365 * 24 * time.Hour
|
||||
if cfg.Config.Expiry != "" {
|
||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing expiry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(outputDir, 0750); err != nil {
|
||||
return fmt.Errorf("creating output directory: %w", err)
|
||||
}
|
||||
|
||||
totalFormats := 0
|
||||
for _, group := range cfg.Chains {
|
||||
totalFormats += len(group.Outputs.Formats)
|
||||
}
|
||||
createdFiles := make([]string, 0, totalFormats)
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, perr := processChainGroup(groupName, group, expiryDuration, outputDir)
|
||||
if perr != nil {
|
||||
return fmt.Errorf("processing chain group %s: %w", groupName, perr)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
}
|
||||
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||
return fmt.Errorf("generating hash file: %w", gerr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||
return nil, uerr
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Support simple formats like "1y", "6m", "30d"
|
||||
if len(s) < 2 {
|
||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||
}
|
||||
|
||||
unit := s[len(s)-1]
|
||||
value := s[:len(s)-1]
|
||||
|
||||
var multiplier time.Duration
|
||||
switch unit {
|
||||
case 'y', 'Y':
|
||||
multiplier = 365 * 24 * time.Hour
|
||||
case 'm', 'M':
|
||||
multiplier = 30 * 24 * time.Hour
|
||||
case 'd', 'D':
|
||||
multiplier = 24 * time.Hour
|
||||
default:
|
||||
return time.ParseDuration(s)
|
||||
}
|
||||
|
||||
var num int
|
||||
_, err := fmt.Sscanf(value, "%d", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||
}
|
||||
|
||||
return time.Duration(num) * multiplier, nil
|
||||
}
|
||||
|
||||
func processChainGroup(
|
||||
groupName string,
|
||||
group ChainGroup,
|
||||
expiryDuration time.Duration,
|
||||
outputDir string,
|
||||
) ([]string, error) {
|
||||
// Default encoding to "pem" if not specified
|
||||
encoding := group.Outputs.Encoding
|
||||
if encoding == "" {
|
||||
encoding = "pem"
|
||||
}
|
||||
|
||||
// Collect certificates from all chains in the group
|
||||
singleFileCerts, individualCerts, sourcePaths, err := loadAndCollectCerts(
|
||||
group.Certs,
|
||||
group.Outputs,
|
||||
expiryDuration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare files for inclusion in archives
|
||||
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, sourcePaths, group.Outputs, encoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create archives for the entire group
|
||||
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles, outputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||
func loadAndCollectCerts(
|
||||
chains []CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) ([]*x509.Certificate, []certWithPath, []string, error) {
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
var sourcePaths []string
|
||||
|
||||
for _, chain := range chains {
|
||||
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||
if cerr != nil {
|
||||
return nil, nil, nil, cerr
|
||||
}
|
||||
if len(s) > 0 {
|
||||
singleFileCerts = append(singleFileCerts, s...)
|
||||
}
|
||||
if len(i) > 0 {
|
||||
individualCerts = append(individualCerts, i...)
|
||||
}
|
||||
// Record source paths for timestamp preservation
|
||||
// Only append when loading succeeded
|
||||
sourcePaths = append(sourcePaths, chain.Root)
|
||||
sourcePaths = append(sourcePaths, chain.Intermediates...)
|
||||
}
|
||||
|
||||
return singleFileCerts, individualCerts, sourcePaths, nil
|
||||
}
|
||||
|
||||
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||
func collectFromChain(
|
||||
chain CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) (
|
||||
[]*x509.Certificate,
|
||||
[]certWithPath,
|
||||
error,
|
||||
) {
|
||||
var single []*x509.Certificate
|
||||
var indiv []certWithPath
|
||||
|
||||
// Load root certificate
|
||||
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||
if rerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, rootCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||
if lerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"intermediate %s is not properly signed by root %s: %w",
|
||||
intPath,
|
||||
chain.Root,
|
||||
sigErr,
|
||||
)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||
}
|
||||
}
|
||||
|
||||
return single, indiv, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives.
|
||||
func prepareArchiveFiles(
|
||||
singleFileCerts []*x509.Certificate,
|
||||
individualCerts []certWithPath,
|
||||
sourcePaths []string,
|
||||
outputs Outputs,
|
||||
encoding string,
|
||||
) ([]fileEntry, error) {
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Track used filenames to avoid collisions inside archives
|
||||
usedNames := make(map[string]int)
|
||||
|
||||
// Handle a single bundle file
|
||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
bundleTime := maxModTime(sourcePaths)
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||
}
|
||||
for i := range files {
|
||||
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||
files[i].modTime = bundleTime
|
||||
// Best-effort: we do not have a portable birth/creation time.
|
||||
// Use the same timestamp for created time to track deterministically.
|
||||
files[i].createTime = bundleTime
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
|
||||
// Handle individual files
|
||||
if outputs.IncludeIndividual {
|
||||
for _, cp := range individualCerts {
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||
}
|
||||
mt := fileModTime(cp.path)
|
||||
for i := range files {
|
||||
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||
files[i].modTime = mt
|
||||
files[i].createTime = mt
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate manifest if requested
|
||||
if outputs.Manifest {
|
||||
manifestContent := generateManifest(archiveFiles)
|
||||
manifestName := makeUniqueName("MANIFEST", usedNames)
|
||||
mt := maxModTime(sourcePaths)
|
||||
archiveFiles = append(archiveFiles, fileEntry{
|
||||
name: manifestName,
|
||||
content: manifestContent,
|
||||
modTime: mt,
|
||||
createTime: mt,
|
||||
})
|
||||
}
|
||||
|
||||
return archiveFiles, nil
|
||||
}
|
||||
|
||||
// createArchiveFiles creates archive files in the specified formats.
|
||||
func createArchiveFiles(
|
||||
groupName string,
|
||||
formats []string,
|
||||
archiveFiles []fileEntry,
|
||||
outputDir string,
|
||||
) ([]string, error) {
|
||||
createdFiles := make([]string, 0, len(formats))
|
||||
|
||||
for _, format := range formats {
|
||||
ext, ok := formatExtensions[format]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
createdFiles = append(createdFiles, archivePath)
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||
now := time.Now()
|
||||
expiryThreshold := now.Add(expiryDuration)
|
||||
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(
|
||||
os.Stderr,
|
||||
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||
path,
|
||||
-daysUntilExpiry,
|
||||
)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
name string
|
||||
content []byte
|
||||
modTime time.Time
|
||||
createTime time.Time
|
||||
}
|
||||
|
||||
type certWithPath struct {
|
||||
cert *x509.Certificate
|
||||
path string
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||
func encodeCertsToFiles(
|
||||
certs []*x509.Certificate,
|
||||
baseName string,
|
||||
encoding string,
|
||||
isSingle bool,
|
||||
) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
case "pem":
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
case "der":
|
||||
if isSingle {
|
||||
// For single file in DER, concatenate all cert DER bytes
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
// Individual DER file (should only have one cert)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
// Add DER version
|
||||
if isSingle {
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format.
|
||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||
}
|
||||
return pemContent
|
||||
}
|
||||
|
||||
func generateManifest(files []fileEntry) []byte {
|
||||
// Build a sorted list of files by filename to ensure deterministic manifest ordering
|
||||
sorted := make([]fileEntry, 0, len(files))
|
||||
for _, f := range files {
|
||||
// Defensive: skip any existing manifest entry
|
||||
if f.name == "MANIFEST" {
|
||||
continue
|
||||
}
|
||||
sorted = append(sorted, f)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool { return sorted[i].name < sorted[j].name })
|
||||
|
||||
var manifest strings.Builder
|
||||
for _, file := range sorted {
|
||||
hash := sha256.Sum256(file.content)
|
||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||
}
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||
for _, c := range closers {
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := c.Close(); cerr != nil {
|
||||
baseErr = errors.Join(baseErr, cerr)
|
||||
}
|
||||
}
|
||||
return baseErr
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, zerr := os.Create(path)
|
||||
if zerr != nil {
|
||||
return zerr
|
||||
}
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &zip.FileHeader{
|
||||
Name: file.name,
|
||||
Method: zip.Deflate,
|
||||
}
|
||||
if !file.modTime.IsZero() {
|
||||
hdr.SetModTime(file.modTime)
|
||||
}
|
||||
fw, werr := w.CreateHeader(hdr)
|
||||
if werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
if _, werr = fw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations
|
||||
if cerr := w.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, terr := os.Create(path)
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Mode: defaultFileMode,
|
||||
Size: int64(len(file.content)),
|
||||
ModTime: func() time.Time {
|
||||
if file.modTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return file.modTime
|
||||
}(),
|
||||
}
|
||||
// Set additional times if supported
|
||||
hdr.AccessTime = hdr.ModTime
|
||||
if !file.createTime.IsZero() {
|
||||
hdr.ChangeTime = file.createTime
|
||||
} else {
|
||||
hdr.ChangeTime = hdr.ModTime
|
||||
}
|
||||
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||
return closeWithErr(herr, tw, gw, f)
|
||||
}
|
||||
if _, werr := tw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, tw, gw, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations in the correct order
|
||||
if cerr := tw.Close(); cerr != nil {
|
||||
_ = gw.Close()
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
if cerr := gw.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func generateHashFile(path string, files []string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, rerr := os.ReadFile(file)
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeUniqueName ensures that each file name within the archive is unique by appending
|
||||
// an incremental numeric suffix before the extension when collisions occur.
|
||||
// Example: "root.pem" -> "root-2.pem", "root-3.pem", etc.
|
||||
func makeUniqueName(name string, used map[string]int) string {
|
||||
// If unused, mark and return as-is
|
||||
if _, ok := used[name]; !ok {
|
||||
used[name] = 1
|
||||
return name
|
||||
}
|
||||
|
||||
ext := filepath.Ext(name)
|
||||
base := strings.TrimSuffix(name, ext)
|
||||
// Track a counter per base+ext key
|
||||
key := base + ext
|
||||
counter := max(used[key], 1)
|
||||
for {
|
||||
counter++
|
||||
candidate := fmt.Sprintf("%s-%d%s", base, counter, ext)
|
||||
if _, exists := used[candidate]; !exists {
|
||||
used[key] = counter
|
||||
used[candidate] = 1
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fileModTime returns the file's modification time, or time.Now() if stat fails.
|
||||
func fileModTime(path string) time.Time {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
return fi.ModTime()
|
||||
}
|
||||
|
||||
// maxModTime returns the latest modification time across provided paths.
|
||||
// If the list is empty or stats fail, returns time.Now().
|
||||
func maxModTime(paths []string) time.Time {
|
||||
var zero time.Time
|
||||
maxTime := zero
|
||||
for _, p := range paths {
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mt := fi.ModTime()
|
||||
if maxTime.IsZero() || mt.After(maxTime) {
|
||||
maxTime = mt
|
||||
}
|
||||
}
|
||||
if maxTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return maxTime
|
||||
}
|
||||
33
certlib/certerr/doc.go
Normal file
33
certlib/certerr/doc.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Package certerr provides typed errors and helpers for certificate-related
|
||||
// operations across the repository. It standardizes error construction and
|
||||
// matching so callers can reliably branch on error source/kind using the
|
||||
// Go 1.13+ `errors.Is` and `errors.As` helpers.
|
||||
//
|
||||
// Guidelines
|
||||
// - Always wrap underlying causes using the helper constructors or with
|
||||
// fmt.Errorf("context: %w", err).
|
||||
// - Do not include sensitive data (keys, passwords, tokens) in error
|
||||
// messages; add only non-sensitive, actionable context.
|
||||
// - Prefer programmatic checks via errors.Is (for sentinel errors) and
|
||||
// errors.As (to retrieve *certerr.Error) rather than relying on error
|
||||
// string contents.
|
||||
//
|
||||
// Typical usage
|
||||
//
|
||||
// if err := doParse(); err != nil {
|
||||
// return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
// }
|
||||
//
|
||||
// Callers may branch on error kinds and sources:
|
||||
//
|
||||
// var e *certerr.Error
|
||||
// if errors.As(err, &e) {
|
||||
// switch e.Kind {
|
||||
// case certerr.KindParse:
|
||||
// // handle parse error
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Sentinel errors are provided for common conditions like
|
||||
// `certerr.ErrEncryptedPrivateKey` and can be matched with `errors.Is`.
|
||||
package certerr
|
||||
@@ -37,43 +37,84 @@ const (
|
||||
ErrorSourceKeypair ErrorSourceType = 5
|
||||
)
|
||||
|
||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
||||
// ErrorKind is a broad classification describing what went wrong.
|
||||
type ErrorKind uint8
|
||||
|
||||
const (
|
||||
KindParse ErrorKind = iota + 1
|
||||
KindDecode
|
||||
KindVerify
|
||||
KindLoad
|
||||
)
|
||||
|
||||
func (k ErrorKind) String() string {
|
||||
switch k {
|
||||
case KindParse:
|
||||
return "parse"
|
||||
case KindDecode:
|
||||
return "decode"
|
||||
case KindVerify:
|
||||
return "verify"
|
||||
case KindLoad:
|
||||
return "load"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Error is a typed, wrapped error with structured context for programmatic checks.
|
||||
// It implements error and supports errors.Is/As via Unwrap.
|
||||
type Error struct {
|
||||
Source ErrorSourceType // which domain produced the error (certificate, private key, etc.)
|
||||
Kind ErrorKind // operation category (parse, decode, verify, load)
|
||||
Op string // optional operation or function name
|
||||
Err error // wrapped cause
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
// Keep message format consistent with existing helpers: "failed to <kind> <source>: <err>"
|
||||
// Do not include Op by default to preserve existing output expectations.
|
||||
return fmt.Sprintf("failed to %s %s: %v", e.Kind.String(), e.Source.String(), e.Err)
|
||||
}
|
||||
|
||||
func (e *Error) Unwrap() error { return e.Err }
|
||||
|
||||
// InvalidPEMTypeError is used to indicate that we were expecting one type of PEM
|
||||
// file, but saw another.
|
||||
type InvalidPEMType struct {
|
||||
type InvalidPEMTypeError struct {
|
||||
have string
|
||||
want []string
|
||||
}
|
||||
|
||||
func (err *InvalidPEMType) Error() string {
|
||||
func (err *InvalidPEMTypeError) Error() string {
|
||||
if len(err.want) == 1 {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||
} else {
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||
}
|
||||
|
||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
||||
// ErrInvalidPEMType returns a new InvalidPEMTypeError error.
|
||||
func ErrInvalidPEMType(have string, want ...string) error {
|
||||
return &InvalidPEMType{
|
||||
return &InvalidPEMTypeError{
|
||||
have: have,
|
||||
want: want,
|
||||
}
|
||||
}
|
||||
|
||||
func LoadingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to load %s from disk: %w", t, err)
|
||||
return &Error{Source: t, Kind: KindLoad, Err: err}
|
||||
}
|
||||
|
||||
func ParsingError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to parse %s: %w", t, err)
|
||||
return &Error{Source: t, Kind: KindParse, Err: err}
|
||||
}
|
||||
|
||||
func DecodeError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to decode %s: %w", t, err)
|
||||
return &Error{Source: t, Kind: KindDecode, Err: err}
|
||||
}
|
||||
|
||||
func VerifyError(t ErrorSourceType, err error) error {
|
||||
return fmt.Errorf("failed to verify %s: %w", t, err)
|
||||
return &Error{Source: t, Kind: KindVerify, Err: err}
|
||||
}
|
||||
|
||||
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
||||
|
||||
56
certlib/certerr/errors_test.go
Normal file
56
certlib/certerr/errors_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package certerr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTypedErrorWrappingAndFormatting(t *testing.T) {
|
||||
cause := errors.New("bad data")
|
||||
err := DecodeError(ErrorSourceCertificate, cause)
|
||||
|
||||
// Ensure we can retrieve the typed error
|
||||
var e *Error
|
||||
if !errors.As(err, &e) {
|
||||
t.Fatalf("expected errors.As to retrieve *certerr.Error, got %T", err)
|
||||
}
|
||||
if e.Kind != KindDecode {
|
||||
t.Fatalf("unexpected kind: %v", e.Kind)
|
||||
}
|
||||
if e.Source != ErrorSourceCertificate {
|
||||
t.Fatalf("unexpected source: %v", e.Source)
|
||||
}
|
||||
|
||||
// Check message format (no trailing punctuation enforced by content)
|
||||
msg := e.Error()
|
||||
if !strings.Contains(msg, "failed to decode certificate") || !strings.Contains(msg, "bad data") {
|
||||
t.Fatalf("unexpected error message: %q", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorsIsOnWrappedSentinel(t *testing.T) {
|
||||
err := DecodeError(ErrorSourcePrivateKey, ErrEncryptedPrivateKey)
|
||||
if !errors.Is(err, ErrEncryptedPrivateKey) {
|
||||
t.Fatalf("expected errors.Is to match ErrEncryptedPrivateKey")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPEMTypeMessageSingle(t *testing.T) {
|
||||
err := ErrInvalidPEMType("FOO", "CERTIFICATE")
|
||||
want := "invalid PEM type: have FOO, expected CERTIFICATE"
|
||||
if err.Error() != want {
|
||||
t.Fatalf("unexpected error message: got %q, want %q", err.Error(), want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPEMTypeMessageMultiple(t *testing.T) {
|
||||
err := ErrInvalidPEMType("FOO", "CERTIFICATE", "NEW CERTIFICATE REQUEST")
|
||||
if !strings.Contains(
|
||||
err.Error(),
|
||||
"invalid PEM type: have FOO, expected one of CERTIFICATE, NEW CERTIFICATE REQUEST",
|
||||
) {
|
||||
t.Fatalf("unexpected error message: %q", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -4,43 +4,53 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
)
|
||||
|
||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||
// byte slice.
|
||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
||||
func ReadCertificate(in []byte) (*x509.Certificate, []byte, error) {
|
||||
if len(in) == 0 {
|
||||
err = certerr.ErrEmptyCertificate
|
||||
return
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, certerr.ErrEmptyCertificate)
|
||||
}
|
||||
|
||||
if in[0] == '-' {
|
||||
p, remaining := pem.Decode(in)
|
||||
if p == nil {
|
||||
err = errors.New("certlib: invalid PEM file")
|
||||
return
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("invalid PEM file"))
|
||||
}
|
||||
|
||||
rest = remaining
|
||||
rest := remaining
|
||||
if p.Type != "CERTIFICATE" {
|
||||
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
|
||||
return
|
||||
return nil, rest, certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE"),
|
||||
)
|
||||
}
|
||||
|
||||
in = p.Bytes
|
||||
cert, err := x509.ParseCertificate(in)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, rest, nil
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(in)
|
||||
return
|
||||
cert, err := x509.ParseCertificate(in)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, nil, nil
|
||||
}
|
||||
|
||||
// ReadCertificates tries to read all the certificates in a
|
||||
// PEM-encoded collection.
|
||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
func ReadCertificates(in []byte) ([]*x509.Certificate, error) {
|
||||
var cert *x509.Certificate
|
||||
var certs []*x509.Certificate
|
||||
var err error
|
||||
for {
|
||||
cert, in, err = ReadCertificate(in)
|
||||
if err != nil {
|
||||
@@ -64,9 +74,9 @@ func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
||||
// the file contains multiple certificates (e.g. a chain), only the
|
||||
// first certificate is returned.
|
||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
in, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
|
||||
cert, _, err := ReadCertificate(in)
|
||||
@@ -76,9 +86,9 @@ func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||
// LoadCertificates tries to read all the certificates in a file,
|
||||
// returning them in the order that it found them in the file.
|
||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
in, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
|
||||
return ReadCertificates(in)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package certlib
|
||||
|
||||
import (
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
@@ -47,29 +48,36 @@ import (
|
||||
// private key. The key must not be in PEM format. If an error is returned, it
|
||||
// may contain information about the private key, so care should be taken when
|
||||
// displaying it directly.
|
||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
}
|
||||
func ParsePrivateKeyDER(keyDER []byte) (crypto.Signer, error) {
|
||||
// Try common encodings in order without deep nesting.
|
||||
if k, err := x509.ParsePKCS8PrivateKey(keyDER); err == nil {
|
||||
switch kk := k.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return kk, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return kk, nil
|
||||
case ed25519.PrivateKey:
|
||||
return kk, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||
}
|
||||
}
|
||||
|
||||
switch generalKey := generalKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return generalKey, nil
|
||||
case ed25519.PrivateKey:
|
||||
return generalKey, nil
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
||||
if k, err := x509.ParsePKCS1PrivateKey(keyDER); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
if k, err := x509.ParseECPrivateKey(keyDER); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
if k, err := ParseEd25519PrivateKey(keyDER); err == nil {
|
||||
if kk, ok := k.(ed25519.PrivateKey); ok {
|
||||
return kk, nil
|
||||
}
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||
}
|
||||
// If all parsers failed, return the last error from Ed25519 attempt (approximate cause).
|
||||
if _, err := ParseEd25519PrivateKey(keyDER); err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||
}
|
||||
// Fallback (should be unreachable)
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, errors.New("unknown key encoding"))
|
||||
}
|
||||
|
||||
@@ -65,12 +65,14 @@ func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
||||
return nil, errEd25519WrongKeyType
|
||||
}
|
||||
|
||||
const bitsPerByte = 8
|
||||
|
||||
spki := subjectPublicKeyInfo{
|
||||
Algorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: ed25519OID,
|
||||
},
|
||||
PublicKey: asn1.BitString{
|
||||
BitLength: len(pub) * 8,
|
||||
BitLength: len(pub) * bitsPerByte,
|
||||
Bytes: pub,
|
||||
},
|
||||
}
|
||||
@@ -91,7 +93,8 @@ func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
||||
return nil, errEd25519WrongID
|
||||
}
|
||||
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
||||
const bitsPerByte = 8
|
||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*bitsPerByte {
|
||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||
}
|
||||
|
||||
|
||||
175
certlib/fetch.go
Normal file
175
certlib/fetch.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
// FetcherOpts are options for fetching certificates. They are only applicable to ServerFetcher.
|
||||
type FetcherOpts struct {
|
||||
SkipVerify bool
|
||||
Roots *x509.CertPool
|
||||
}
|
||||
|
||||
// Fetcher is an interface for fetching certificates from a remote source. It
|
||||
// currently supports fetching from a server or a file.
|
||||
type Fetcher interface {
|
||||
Get() (*x509.Certificate, error)
|
||||
GetChain() ([]*x509.Certificate, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
type ServerFetcher struct {
|
||||
host string
|
||||
port int
|
||||
insecure bool
|
||||
roots *x509.CertPool
|
||||
}
|
||||
|
||||
// WithRoots sets the roots for the ServerFetcher.
|
||||
func WithRoots(roots *x509.CertPool) func(*ServerFetcher) {
|
||||
return func(sf *ServerFetcher) {
|
||||
sf.roots = roots
|
||||
}
|
||||
}
|
||||
|
||||
// WithSkipVerify sets the insecure flag for the ServerFetcher.
|
||||
func WithSkipVerify() func(*ServerFetcher) {
|
||||
return func(sf *ServerFetcher) {
|
||||
sf.insecure = true
|
||||
}
|
||||
}
|
||||
|
||||
// ParseServer parses a server string into a ServerFetcher. It can be a URL or a
|
||||
// a host:port pair.
|
||||
func ParseServer(host string) (*ServerFetcher, error) {
|
||||
target, err := hosts.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse server: %w", err)
|
||||
}
|
||||
|
||||
return &ServerFetcher{
|
||||
host: target.Host,
|
||||
port: target.Port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) String() string {
|
||||
return fmt.Sprintf("tls://%s", net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1)))
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||
config := &tls.Config{
|
||||
InsecureSkipVerify: sf.insecure, // #nosec G402 - no shit sherlock
|
||||
RootCAs: sf.roots,
|
||||
}
|
||||
|
||||
dialer := &tls.Dialer{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
hostSpec := net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1))
|
||||
|
||||
netConn, err := dialer.DialContext(context.Background(), "tcp", hostSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing server: %w", err)
|
||||
}
|
||||
|
||||
conn, ok := netConn.(*tls.Conn)
|
||||
if !ok {
|
||||
return nil, errors.New("connection is not TLS")
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
return state.PeerCertificates, nil
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) Get() (*x509.Certificate, error) {
|
||||
certs, err := sf.GetChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
type FileFetcher struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func NewFileFetcher(path string) *FileFetcher {
|
||||
return &FileFetcher{
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) String() string {
|
||||
return ff.path
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||
if ff.path == "-" {
|
||||
certData, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read from stdin: %w", err)
|
||||
}
|
||||
|
||||
return ParseCertificatesPEM(certData)
|
||||
}
|
||||
|
||||
certs, err := LoadCertificates(ff.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load chain: %w", err)
|
||||
}
|
||||
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) Get() (*x509.Certificate, error) {
|
||||
certs, err := ff.GetChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
// GetCertificateChain fetches a certificate chain from a remote source.
|
||||
func GetCertificateChain(spec string, opts *FetcherOpts) ([]*x509.Certificate, error) {
|
||||
if fileutil.FileDoesExist(spec) {
|
||||
return NewFileFetcher(spec).GetChain()
|
||||
}
|
||||
|
||||
fetcher, err := ParseServer(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
fetcher.insecure = opts.SkipVerify
|
||||
fetcher.roots = opts.Roots
|
||||
}
|
||||
|
||||
return fetcher.GetChain()
|
||||
}
|
||||
|
||||
// GetCertificate fetches the first certificate from a certificate chain.
|
||||
func GetCertificate(spec string, opts *FetcherOpts) (*x509.Certificate, error) {
|
||||
certs, err := GetCertificateChain(spec, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
@@ -49,14 +49,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
|
||||
ct "github.com/google/certificate-transparency-go"
|
||||
cttls "github.com/google/certificate-transparency-go/tls"
|
||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
@@ -65,10 +65,10 @@ const OneYear = 8760 * time.Hour
|
||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||
const OneDay = 24 * time.Hour
|
||||
|
||||
// DelegationUsage is the OID for the DelegationUseage extensions
|
||||
// DelegationUsage is the OID for the DelegationUseage extensions.
|
||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||
|
||||
// DelegationExtension
|
||||
// DelegationExtension is a non-critical extension marking delegation usage.
|
||||
var DelegationExtension = pkix.Extension{
|
||||
Id: DelegationUsage,
|
||||
Critical: false,
|
||||
@@ -81,41 +81,51 @@ func InclusiveDate(year int, month time.Month, day int) time.Time {
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||
}
|
||||
|
||||
const (
|
||||
year2012 = 2012
|
||||
year2015 = 2015
|
||||
day1 = 1
|
||||
)
|
||||
|
||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 5 years.
|
||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
||||
var Jul2012 = InclusiveDate(year2012, time.July, day1)
|
||||
|
||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 39 months.
|
||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
||||
var Apr2015 = InclusiveDate(year2015, time.April, day1)
|
||||
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
||||
func KeyLength(key interface{}) int {
|
||||
if key == nil {
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey.
|
||||
func KeyLength(key any) int {
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
return k.Curve.Params().BitSize
|
||||
case *rsa.PublicKey:
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
return k.N.BitLen()
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
||||
return ecdsaKey.Curve.Params().BitSize
|
||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
||||
return rsaKey.N.BitLen()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExpiryTime returns the time when the certificate chain is expired.
|
||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
||||
func ExpiryTime(chain []*x509.Certificate) time.Time {
|
||||
var notAfter time.Time
|
||||
if len(chain) == 0 {
|
||||
return
|
||||
return notAfter
|
||||
}
|
||||
|
||||
notAfter = chain[0].NotAfter
|
||||
for _, cert := range chain {
|
||||
if notAfter.After(cert.NotAfter) {
|
||||
notAfter = cert.NotAfter
|
||||
}
|
||||
}
|
||||
return
|
||||
return notAfter
|
||||
}
|
||||
|
||||
// MonthsValid returns the number of months for which a certificate is valid.
|
||||
@@ -144,109 +154,109 @@ func ValidExpiry(c *x509.Certificate) bool {
|
||||
maxMonths = 39
|
||||
case issued.After(Jul2012):
|
||||
maxMonths = 60
|
||||
case issued.Before(Jul2012):
|
||||
default:
|
||||
maxMonths = 120
|
||||
}
|
||||
|
||||
if MonthsValid(c) > maxMonths {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return MonthsValid(c) <= maxMonths
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
var signatureString = map[x509.SignatureAlgorithm]string{
|
||||
x509.UnknownSignatureAlgorithm: "Unknown Signature",
|
||||
x509.MD2WithRSA: "MD2WithRSA",
|
||||
x509.MD5WithRSA: "MD5WithRSA",
|
||||
x509.SHA1WithRSA: "SHA1WithRSA",
|
||||
x509.SHA256WithRSA: "SHA256WithRSA",
|
||||
x509.SHA384WithRSA: "SHA384WithRSA",
|
||||
x509.SHA512WithRSA: "SHA512WithRSA",
|
||||
x509.SHA256WithRSAPSS: "SHA256WithRSAPSS",
|
||||
x509.SHA384WithRSAPSS: "SHA384WithRSAPSS",
|
||||
x509.SHA512WithRSAPSS: "SHA512WithRSAPSS",
|
||||
x509.DSAWithSHA1: "DSAWithSHA1",
|
||||
x509.DSAWithSHA256: "DSAWithSHA256",
|
||||
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
|
||||
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
|
||||
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
|
||||
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
|
||||
x509.PureEd25519: "PureEd25519",
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2WithRSA"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5WithRSA"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1WithRSA"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256WithRSA"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384WithRSA"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512WithRSA"
|
||||
case x509.DSAWithSHA1:
|
||||
return "DSAWithSHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "DSAWithSHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "ECDSAWithSHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "ECDSAWithSHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "ECDSAWithSHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "ECDSAWithSHA512"
|
||||
default:
|
||||
return "Unknown Signature"
|
||||
if s, ok := signatureString[alg]; ok {
|
||||
return s
|
||||
}
|
||||
return "Unknown Signature"
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
var hashAlgoString = map[x509.SignatureAlgorithm]string{
|
||||
x509.UnknownSignatureAlgorithm: "Unknown Hash Algorithm",
|
||||
x509.MD2WithRSA: "MD2",
|
||||
x509.MD5WithRSA: "MD5",
|
||||
x509.SHA1WithRSA: "SHA1",
|
||||
x509.SHA256WithRSA: "SHA256",
|
||||
x509.SHA384WithRSA: "SHA384",
|
||||
x509.SHA512WithRSA: "SHA512",
|
||||
x509.SHA256WithRSAPSS: "SHA256",
|
||||
x509.SHA384WithRSAPSS: "SHA384",
|
||||
x509.SHA512WithRSAPSS: "SHA512",
|
||||
x509.DSAWithSHA1: "SHA1",
|
||||
x509.DSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA1: "SHA1",
|
||||
x509.ECDSAWithSHA256: "SHA256",
|
||||
x509.ECDSAWithSHA384: "SHA384",
|
||||
x509.ECDSAWithSHA512: "SHA512",
|
||||
x509.PureEd25519: "SHA512", // per x509 docs Ed25519 uses SHA-512 internally
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512"
|
||||
case x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return "Unknown Hash Algorithm"
|
||||
if s, ok := hashAlgoString[alg]; ok {
|
||||
return s
|
||||
}
|
||||
return "Unknown Hash Algorithm"
|
||||
}
|
||||
|
||||
// StringTLSVersion returns underlying enum values from human names for TLS
|
||||
// versions, defaults to current golang default of TLS 1.0
|
||||
// versions, defaults to current golang default of TLS 1.0.
|
||||
func StringTLSVersion(version string) uint16 {
|
||||
switch version {
|
||||
case "1.3":
|
||||
return tls.VersionTLS13
|
||||
case "1.2":
|
||||
return tls.VersionTLS12
|
||||
case "1.1":
|
||||
return tls.VersionTLS11
|
||||
case "1.0":
|
||||
return tls.VersionTLS10
|
||||
default:
|
||||
// Default to Go's historical default of TLS 1.0 for unknown values
|
||||
return tls.VersionTLS10
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
|
||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM.
|
||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||
var buffer bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
pem.Encode(&buffer, &pem.Block{
|
||||
if err := pem.Encode(&buffer, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
})
|
||||
}); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM
|
||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM.
|
||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||
}
|
||||
@@ -269,38 +279,52 @@ func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
||||
certs = append(certs, cert...)
|
||||
}
|
||||
if len(certsPEM) > 0 {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
|
||||
return nil, certerr.DecodeError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("trailing data at end of certificate"),
|
||||
)
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||
// either PKCS #7, PKCS #12, or raw x509.
|
||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
||||
func ParseCertificatesDER(certsDER []byte, password string) ([]*x509.Certificate, crypto.Signer, error) {
|
||||
certsDER = bytes.TrimSpace(certsDER)
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
||||
if err != nil {
|
||||
var pkcs12data interface{}
|
||||
certs = make([]*x509.Certificate, 1)
|
||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
||||
if err != nil {
|
||||
certs, err = x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
} else {
|
||||
key = pkcs12data.(crypto.Signer)
|
||||
}
|
||||
} else {
|
||||
|
||||
// First, try PKCS #7
|
||||
if pkcs7data, err7 := pkcs7.ParsePKCS7(certsDER); err7 == nil {
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
|
||||
return nil, nil, certerr.DecodeError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("can only extract certificates from signed data content info"),
|
||||
)
|
||||
}
|
||||
certs = pkcs7data.Content.SignedData.Certificates
|
||||
certs := pkcs7data.Content.SignedData.Certificates
|
||||
if certs == nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||
}
|
||||
return certs, nil, nil
|
||||
}
|
||||
if certs == nil {
|
||||
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||
|
||||
// Next, try PKCS #12
|
||||
if pkcs12data, cert, err12 := pkcs12.Decode(certsDER, password); err12 == nil {
|
||||
signer, ok := pkcs12data.(crypto.Signer)
|
||||
if !ok {
|
||||
return nil, nil, certerr.DecodeError(
|
||||
certerr.ErrorSourcePrivateKey,
|
||||
errors.New("PKCS12 data does not contain a private key"),
|
||||
)
|
||||
}
|
||||
return []*x509.Certificate{cert}, signer, nil
|
||||
}
|
||||
return certs, key, nil
|
||||
|
||||
// Finally, attempt to parse raw X.509 certificates
|
||||
certs, err := x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return certs, nil, nil
|
||||
}
|
||||
|
||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||
@@ -310,7 +334,8 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
||||
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
|
||||
if err != nil {
|
||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return cert, nil
|
||||
@@ -320,17 +345,26 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
certPEM = bytes.TrimSpace(certPEM)
|
||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||
certs, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
} else if cert == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||
} else if len(rest) > 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
|
||||
} else if len(cert) > 1 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
||||
}
|
||||
return cert[0], nil
|
||||
if certs == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("the PEM file should contain only one object"),
|
||||
)
|
||||
}
|
||||
if len(certs) > 1 {
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("the PKCS7 object in the PEM file should contain only one certificate"),
|
||||
)
|
||||
}
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||
@@ -338,7 +372,6 @@ func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
// multiple certificates, from the top of certsPEM, which itself may
|
||||
// contain multiple PEM encoded certificate objects.
|
||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||
|
||||
block, rest := pem.Decode(certsPEM)
|
||||
if block == nil {
|
||||
return nil, rest, nil
|
||||
@@ -346,8 +379,8 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err2 := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err2 != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
@@ -363,10 +396,49 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
||||
return certs, rest, nil
|
||||
}
|
||||
|
||||
// LoadFullCertPool returns a certificate pool with roots and intermediates
|
||||
// from disk. If no roots are provided, the system root pool will be used.
|
||||
func LoadFullCertPool(roots, intermediates string) (*x509.CertPool, error) {
|
||||
var err error
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
if roots == "" {
|
||||
pool, err = x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading system cert pool: %w", err)
|
||||
}
|
||||
} else {
|
||||
var rootCerts []*x509.Certificate
|
||||
rootCerts, err = LoadCertificates(roots)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading roots: %w", err)
|
||||
}
|
||||
|
||||
for _, cert := range rootCerts {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
if intermediates != "" {
|
||||
var intCerts []*x509.Certificate
|
||||
intCerts, err = LoadCertificates(intermediates)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading intermediates: %w", err)
|
||||
}
|
||||
|
||||
for _, cert := range intCerts {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
if certsFile == "" {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // no CA file provided -> treat as no pool and no error
|
||||
}
|
||||
pemCerts, err := os.ReadFile(certsFile)
|
||||
if err != nil {
|
||||
@@ -379,12 +451,12 @@ func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
if len(pemCerts) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // empty input means no pool needed
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||
return nil, errors.New("failed to load cert pool")
|
||||
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, errors.New("failed to load cert pool"))
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
@@ -393,14 +465,14 @@ func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (crypto.Signer, error) {
|
||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (crypto.Signer, error) {
|
||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -420,44 +492,47 @@ func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if keyDER != nil {
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
||||
if strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
if keyDER == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
}
|
||||
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok && strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
}
|
||||
|
||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
||||
func ParseCSR(in []byte) (*x509.CertificateRequest, []byte, error) {
|
||||
in = bytes.TrimSpace(in)
|
||||
p, rest := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
|
||||
if p == nil {
|
||||
csr, err := x509.ParseCertificateRequest(in)
|
||||
if err != nil {
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||
}
|
||||
|
||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
||||
} else {
|
||||
csr, err = x509.ParseCertificateRequest(in)
|
||||
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||
}
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, certerr.ParsingError(
|
||||
certerr.ErrorSourceCSR,
|
||||
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
|
||||
)
|
||||
}
|
||||
|
||||
csr, err := x509.ParseCertificateRequest(p.Bytes)
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||
}
|
||||
|
||||
err = csr.CheckSignature()
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||
}
|
||||
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
@@ -465,14 +540,14 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
|
||||
// It does not check the signature. This is useful for dumping data from a CSR
|
||||
// locally.
|
||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
block, _ := pem.Decode([]byte(csrPEM))
|
||||
block, _ := pem.Decode(csrPEM)
|
||||
if block == nil {
|
||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||
}
|
||||
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||
}
|
||||
|
||||
return csrObject, nil
|
||||
@@ -480,15 +555,20 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
|
||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
const (
|
||||
rsaBits2048 = 2048
|
||||
rsaBits3072 = 3072
|
||||
rsaBits4096 = 4096
|
||||
)
|
||||
switch pub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
bitLength := pub.N.BitLen()
|
||||
switch {
|
||||
case bitLength >= 4096:
|
||||
case bitLength >= rsaBits4096:
|
||||
return x509.SHA512WithRSA
|
||||
case bitLength >= 3072:
|
||||
case bitLength >= rsaBits3072:
|
||||
return x509.SHA384WithRSA
|
||||
case bitLength >= 2048:
|
||||
case bitLength >= rsaBits2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
@@ -509,7 +589,7 @@ func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
}
|
||||
}
|
||||
|
||||
// LoadClientCertificate load key/certificate from pem files
|
||||
// LoadClientCertificate load key/certificate from pem files.
|
||||
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
||||
if certFile != "" && keyFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
@@ -518,10 +598,10 @@ func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, e
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil // absence of client cert is not an error
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config object from certs and roots
|
||||
// CreateTLSConfig creates a tls.Config object from certs and roots.
|
||||
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
||||
var certs []tls.Certificate
|
||||
if cert != nil {
|
||||
@@ -530,6 +610,7 @@ func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Confi
|
||||
return &tls.Config{
|
||||
Certificates: certs,
|
||||
RootCAs: remoteCAs,
|
||||
MinVersion: tls.VersionTLS12, // secure default
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,18 +635,24 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
||||
return nil, err
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceSCTList,
|
||||
errors.New("serialized SCT list contained trailing garbage"),
|
||||
)
|
||||
}
|
||||
|
||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||
for i, serializedSCT := range sctList.SCTList {
|
||||
var sct ct.SignedCertificateTimestamp
|
||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
rest2, err2 := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
||||
if len(rest2) != 0 {
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceSCTList,
|
||||
errors.New("serialized SCT list contained trailing garbage"),
|
||||
)
|
||||
}
|
||||
list[i] = sct
|
||||
}
|
||||
@@ -577,12 +664,12 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
||||
// unmarshalled.
|
||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||
// This loop finds the SCTListExtension in the OCSP response.
|
||||
var SCTListExtension, ext pkix.Extension
|
||||
var sctListExtension, ext pkix.Extension
|
||||
for _, ext = range response.Extensions {
|
||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||
if ext.Id.Equal(sctExtOid) {
|
||||
SCTListExtension = ext
|
||||
sctListExtension = ext
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -590,10 +677,10 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
||||
// This code block extracts the sctList from the SCT extension.
|
||||
var sctList []ct.SignedCertificateTimestamp
|
||||
var err error
|
||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
||||
if numBytes := len(sctListExtension.Value); numBytes != 0 {
|
||||
var serializedSCTList []byte
|
||||
rest := make([]byte, numBytes)
|
||||
copy(rest, SCTListExtension.Value)
|
||||
copy(rest, sctListExtension.Value)
|
||||
for len(rest) != 0 {
|
||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||
if err != nil {
|
||||
@@ -611,20 +698,16 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
||||
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
||||
// file path.
|
||||
func ReadBytes(valFile string) ([]byte, error) {
|
||||
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
|
||||
case 1:
|
||||
prefix, rest, found := strings.Cut(valFile, ":")
|
||||
if !found {
|
||||
return os.ReadFile(valFile)
|
||||
case 2:
|
||||
switch splitVal[0] {
|
||||
case "env":
|
||||
return []byte(os.Getenv(splitVal[1])), nil
|
||||
case "file":
|
||||
return os.ReadFile(splitVal[1])
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
|
||||
}
|
||||
}
|
||||
switch prefix {
|
||||
case "env":
|
||||
return []byte(os.Getenv(rest)), nil
|
||||
case "file":
|
||||
return os.ReadFile(rest)
|
||||
default:
|
||||
return nil, fmt.Errorf("multiple prefixes: %s",
|
||||
strings.Join(splitVal[:len(splitVal)-1], ", "))
|
||||
return nil, fmt.Errorf("unknown prefix: %s", prefix)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const defaultHTTPSPort = 443
|
||||
|
||||
type Target struct {
|
||||
Host string
|
||||
Port int
|
||||
@@ -24,45 +26,50 @@ func parseURL(host string) (string, int, error) {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
}
|
||||
|
||||
if strings.ToLower(url.Scheme) != "https" {
|
||||
switch strings.ToLower(url.Scheme) {
|
||||
case "https":
|
||||
// OK
|
||||
case "tls":
|
||||
// OK
|
||||
default:
|
||||
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
||||
}
|
||||
|
||||
if url.Port() == "" {
|
||||
return url.Hostname(), 443, nil
|
||||
return url.Hostname(), defaultHTTPSPort, nil
|
||||
}
|
||||
|
||||
port, err := strconv.ParseInt(url.Port(), 10, 16)
|
||||
if err != nil {
|
||||
portInt, err2 := strconv.ParseInt(url.Port(), 10, 16)
|
||||
if err2 != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
||||
}
|
||||
|
||||
return url.Hostname(), int(port), nil
|
||||
return url.Hostname(), int(portInt), nil
|
||||
}
|
||||
|
||||
func parseHostPort(host string) (string, int, error) {
|
||||
host, sport, err := net.SplitHostPort(host)
|
||||
shost, sport, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
port, err := strconv.ParseInt(sport, 10, 16)
|
||||
if err != nil {
|
||||
portInt, err2 := strconv.ParseInt(sport, 10, 16)
|
||||
if err2 != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||
}
|
||||
|
||||
return host, int(port), nil
|
||||
return shost, int(portInt), nil
|
||||
}
|
||||
|
||||
return host, 443, nil
|
||||
return host, defaultHTTPSPort, nil
|
||||
}
|
||||
|
||||
func ParseHost(host string) (*Target, error) {
|
||||
host, port, err := parseURL(host)
|
||||
uhost, port, err := parseURL(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
return &Target{Host: uhost, Port: port}, nil
|
||||
}
|
||||
|
||||
host, port, err = parseHostPort(host)
|
||||
shost, port, err := parseHostPort(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
return &Target{Host: shost, Port: port}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
|
||||
35
certlib/hosts/hosts_test.go
Normal file
35
certlib/hosts/hosts_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package hosts_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
Host string
|
||||
Target hosts.Target
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{Host: "server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "tls://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "https://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "https://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "tls://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "https://server-name/something/else", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
}
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
for i, tc := range testCases {
|
||||
target, err := hosts.ParseHost(tc.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("test case %d: %s", i+1, err)
|
||||
}
|
||||
|
||||
if target.Host != tc.Target.Host {
|
||||
t.Fatalf("test case %d: got host '%s', want host '%s'", i+1, target.Host, tc.Target.Host)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -93,7 +93,7 @@ type signedData struct {
|
||||
Version int
|
||||
DigestAlgorithms asn1.RawValue
|
||||
ContentInfo asn1.RawValue
|
||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
||||
Certificates asn1.RawValue `asn1:"optional"`
|
||||
Crls asn1.RawValue `asn1:"optional"`
|
||||
SignerInfos asn1.RawValue
|
||||
}
|
||||
@@ -158,9 +158,9 @@ type EncryptedContentInfo struct {
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
func unmarshalInit(raw []byte) (init initPKCS7, err error) {
|
||||
_, err = asn1.Unmarshal(raw, &init)
|
||||
if err != nil {
|
||||
func unmarshalInit(raw []byte) (initPKCS7, error) {
|
||||
var init initPKCS7
|
||||
if _, err := asn1.Unmarshal(raw, &init); err != nil {
|
||||
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
return init, nil
|
||||
@@ -207,7 +207,10 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
||||
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||
}
|
||||
if ed.Version != 0 {
|
||||
return certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
|
||||
return certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("only PKCS #7 encryptedData version 0 is supported"),
|
||||
)
|
||||
}
|
||||
msg.Content.EncryptedData = ed
|
||||
return nil
|
||||
@@ -215,34 +218,35 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
||||
|
||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||
// PKCS7 structure.
|
||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
||||
|
||||
func ParsePKCS7(raw []byte) (*PKCS7, error) {
|
||||
pkcs7, err := unmarshalInit(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg = new(PKCS7)
|
||||
msg := new(PKCS7)
|
||||
msg.Raw = pkcs7.Raw
|
||||
msg.ContentInfo = pkcs7.ContentType.String()
|
||||
|
||||
switch msg.ContentInfo {
|
||||
case ObjIDData:
|
||||
if err := populateData(msg, pkcs7.Content); err != nil {
|
||||
return nil, err
|
||||
if e := populateData(msg, pkcs7.Content); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
case ObjIDSignedData:
|
||||
if err := populateSignedData(msg, pkcs7.Content.Bytes); err != nil {
|
||||
return nil, err
|
||||
if e := populateSignedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
case ObjIDEncryptedData:
|
||||
if err := populateEncryptedData(msg, pkcs7.Content.Bytes); err != nil {
|
||||
return nil, err
|
||||
if e := populateEncryptedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
default:
|
||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
|
||||
return nil, certerr.ParsingError(
|
||||
certerr.ErrorSourceCertificate,
|
||||
errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"),
|
||||
)
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ package revoke
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
@@ -89,35 +90,35 @@ func ldapURL(url string) bool {
|
||||
// - false, false: an error was encountered while checking revocations.
|
||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||
// - true, false: failure to check revocation status causes verification to fail
|
||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
// - true, false: failure to check revocation status causes verification to fail.
|
||||
func revCheck(cert *x509.Certificate) (bool, bool, error) {
|
||||
for _, url := range cert.CRLDistributionPoints {
|
||||
if ldapURL(url) {
|
||||
log.Infof("skipping LDAP CRL: %s", url)
|
||||
continue
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
||||
if rvk, ok2, err2 := certIsRevokedCRL(cert, url); !ok2 {
|
||||
log.Warning("error checking revocation via CRL")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
return true, false, err2
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
return false, false, err2
|
||||
} else if rvk {
|
||||
log.Info("certificate is revoked via CRL")
|
||||
return true, true, err
|
||||
return true, true, err2
|
||||
}
|
||||
}
|
||||
|
||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
||||
if rvk, ok2, err2 := certIsRevokedOCSP(cert, HardFail); !ok2 {
|
||||
log.Warning("error checking revocation via OCSP")
|
||||
if HardFail {
|
||||
return true, false, err
|
||||
return true, false, err2
|
||||
}
|
||||
return false, false, err
|
||||
} else if revoked {
|
||||
return false, false, err2
|
||||
} else if rvk {
|
||||
log.Info("certificate is revoked via OCSP")
|
||||
return true, true, err
|
||||
return true, true, err2
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
@@ -125,13 +126,17 @@ func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
|
||||
// fetchCRL fetches and parses a CRL.
|
||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 300 {
|
||||
if resp.StatusCode >= http.StatusMultipleChoices {
|
||||
return nil, errors.New("failed to retrieve CRL")
|
||||
}
|
||||
|
||||
@@ -154,12 +159,11 @@ func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
||||
}
|
||||
|
||||
return issuer
|
||||
|
||||
}
|
||||
|
||||
// check a cert against a specific CRL. Returns the same bool pair
|
||||
// as revCheck, plus an error if one occurred.
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (bool, bool, error) {
|
||||
crlLock.Lock()
|
||||
crl, ok := CRLSet[url]
|
||||
if ok && crl == nil {
|
||||
@@ -187,10 +191,9 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
||||
|
||||
// check CRL signature
|
||||
if issuer != nil {
|
||||
err = crl.CheckSignatureFrom(issuer)
|
||||
if err != nil {
|
||||
log.Warningf("failed to verify CRL: %v", err)
|
||||
return false, false, err
|
||||
if sigErr := crl.CheckSignatureFrom(issuer); sigErr != nil {
|
||||
log.Warningf("failed to verify CRL: %v", sigErr)
|
||||
return false, false, sigErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,40 +202,44 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
||||
crlLock.Unlock()
|
||||
}
|
||||
|
||||
for _, revoked := range crl.RevokedCertificates {
|
||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
||||
for _, entry := range crl.RevokedCertificateEntries {
|
||||
if cert.SerialNumber.Cmp(entry.SerialNumber) == 0 {
|
||||
log.Info("Serial number match: intermediate is revoked.")
|
||||
return true, true, err
|
||||
return true, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, true, err
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
||||
revoked, ok, _ = VerifyCertificateError(cert)
|
||||
func VerifyCertificate(cert *x509.Certificate) (bool, bool) {
|
||||
revoked, ok, _ := VerifyCertificateError(cert)
|
||||
return revoked, ok
|
||||
}
|
||||
|
||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||
// expired and checks the CRL for the server.
|
||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
||||
if !time.Now().Before(cert.NotAfter) {
|
||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||
log.Info(msg)
|
||||
return true, true, errors.New(msg)
|
||||
} else if !time.Now().After(cert.NotBefore) {
|
||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||
log.Info(msg)
|
||||
return true, true, errors.New(msg)
|
||||
}
|
||||
return revCheck(cert)
|
||||
func VerifyCertificateError(cert *x509.Certificate) (bool, bool, error) {
|
||||
if !time.Now().Before(cert.NotAfter) {
|
||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||
log.Info(msg)
|
||||
return true, true, errors.New(msg)
|
||||
} else if !time.Now().After(cert.NotBefore) {
|
||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||
log.Info(msg)
|
||||
return true, true, errors.New(msg)
|
||||
}
|
||||
return revCheck(cert)
|
||||
}
|
||||
|
||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||
resp, err := HTTPClient.Get(url)
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -255,8 +262,12 @@ var ocspOpts = ocsp.RequestOptions{
|
||||
Hash: crypto.SHA1,
|
||||
}
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
||||
var err error
|
||||
const ocspGetURLMaxLen = 256
|
||||
|
||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (bool, bool, error) {
|
||||
var revoked bool
|
||||
var ok bool
|
||||
var lastErr error
|
||||
|
||||
ocspURLs := leaf.OCSPServer
|
||||
if len(ocspURLs) == 0 {
|
||||
@@ -272,15 +283,16 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
|
||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||
if err != nil {
|
||||
return revoked, ok, err
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
for _, server := range ocspURLs {
|
||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if err != nil {
|
||||
resp, e := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||
if e != nil {
|
||||
if strict {
|
||||
return revoked, ok, err
|
||||
return false, false, e
|
||||
}
|
||||
lastErr = e
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -292,9 +304,9 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
revoked = true
|
||||
}
|
||||
|
||||
return revoked, ok, err
|
||||
return revoked, ok, nil
|
||||
}
|
||||
return revoked, ok, err
|
||||
return revoked, ok, lastErr
|
||||
}
|
||||
|
||||
// sendOCSPRequest attempts to request an OCSP response from the
|
||||
@@ -303,12 +315,21 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
if len(req) > 256 {
|
||||
if len(req) > ocspGetURLMaxLen {
|
||||
buf := bytes.NewBuffer(req)
|
||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
||||
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodPost, server, buf)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/ocsp-request")
|
||||
resp, err = HTTPClient.Do(httpReq)
|
||||
} else {
|
||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||
resp, err = HTTPClient.Get(reqURL)
|
||||
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodGet, reqURL, nil)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
resp, err = HTTPClient.Do(httpReq)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -343,21 +364,21 @@ func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate)
|
||||
|
||||
var crlRead = io.ReadAll
|
||||
|
||||
// SetCRLFetcher sets the function to use to read from the http response body
|
||||
// SetCRLFetcher sets the function to use to read from the http response body.
|
||||
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
crlRead = fn
|
||||
}
|
||||
|
||||
var remoteRead = io.ReadAll
|
||||
|
||||
// SetRemoteFetcher sets the function to use to read from the http response body
|
||||
// SetRemoteFetcher sets the function to use to read from the http response body.
|
||||
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
remoteRead = fn
|
||||
}
|
||||
|
||||
var ocspRead = io.ReadAll
|
||||
|
||||
// SetOCSPFetcher sets the function to use to read from the http response body
|
||||
// SetOCSPFetcher sets the function to use to read from the http response body.
|
||||
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||
ocspRead = fn
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||
package revoke
|
||||
|
||||
import (
|
||||
@@ -50,7 +51,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// to indicate that this is the case.
|
||||
|
||||
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
|
||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt.
|
||||
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
||||
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
||||
@@ -80,7 +81,7 @@ sESPRwHkcMUNdAp37FLweUw=
|
||||
|
||||
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
||||
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
|
||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt.
|
||||
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
||||
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
||||
@@ -106,7 +107,7 @@ Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
|
||||
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
|
||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url.
|
||||
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
||||
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
||||
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
||||
@@ -153,7 +154,7 @@ func mustParse(pemData string) *x509.Certificate {
|
||||
panic("Invalid PEM type.")
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
@@ -182,7 +183,6 @@ func TestGood(t *testing.T) {
|
||||
} else if revoked {
|
||||
t.Fatalf("good certificate should not have been marked as revoked")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLdap(t *testing.T) {
|
||||
@@ -230,7 +230,6 @@ func TestBadCRLSet(t *testing.T) {
|
||||
t.Fatalf("key emptystring should be deleted from CRLSet")
|
||||
}
|
||||
delete(CRLSet, "")
|
||||
|
||||
}
|
||||
|
||||
func TestCachedCRLSet(t *testing.T) {
|
||||
@@ -241,13 +240,11 @@ func TestCachedCRLSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoteFetchError(t *testing.T) {
|
||||
|
||||
badurl := ":"
|
||||
|
||||
if _, err := fetchRemote(badurl); err == nil {
|
||||
t.Fatalf("fetching bad url should result in non-nil error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNoOCSPServers(t *testing.T) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -28,10 +29,16 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
||||
|
||||
if verbose {
|
||||
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
||||
os.Stdout.Sync()
|
||||
if err = os.Stdout.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout(proto, addr, timeout)
|
||||
dialer := &net.Dialer{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(context.Background(), proto, addr)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
fmt.Println("failed.")
|
||||
@@ -42,8 +49,8 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
||||
if verbose {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
conn.Close()
|
||||
return nil
|
||||
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"crypto/x509"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -14,22 +15,22 @@ import (
|
||||
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
||||
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
||||
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
||||
var certs []*x509.Certificate
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
if certs, err = certlib.ParseCertificatesPEM(data); err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
if certs, _, err = certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
||||
@@ -56,49 +57,50 @@ var embeddedTestdata embed.FS
|
||||
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
||||
// PEM or DER/PKCS#7 format.
|
||||
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
||||
// Try PEM first
|
||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
||||
certs, err := certlib.ParseCertificatesPEM(data)
|
||||
if err == nil {
|
||||
return certs, nil
|
||||
}
|
||||
// Try DER/PKCS7/PKCS12 (with no password)
|
||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||
|
||||
certs, _, err = certlib.ParseCertificatesDER(data, "")
|
||||
if err == nil {
|
||||
return certs, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
||||
certs, err := loadCertsFromBytes(data)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, fmt.Errorf("failed to load CA certificates from embedded bytes")
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
certs, err := loadCertsFromBytes(data)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return nil, errors.New("failed to load CA certificates from embedded bytes")
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, c := range certs {
|
||||
pool.AddCert(c)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// isSelfSigned returns true if the given certificate is self-signed.
|
||||
// It checks that the subject and issuer match and that the certificate's
|
||||
// signature verifies against its own public key.
|
||||
func isSelfSigned(cert *x509.Certificate) bool {
|
||||
if cert == nil {
|
||||
return false
|
||||
}
|
||||
// Quick check: subject and issuer match
|
||||
if cert.Subject.String() != cert.Issuer.String() {
|
||||
return false
|
||||
}
|
||||
// Cryptographic check: the certificate is signed by itself
|
||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
if cert == nil {
|
||||
return false
|
||||
}
|
||||
// Quick check: subject and issuer match
|
||||
if cert.Subject.String() != cert.Issuer.String() {
|
||||
return false
|
||||
}
|
||||
// Cryptographic check: the certificate is signed by itself
|
||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string) {
|
||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (bool, string) {
|
||||
certs, err := loadCertsFromFile(path)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
@@ -117,14 +119,14 @@ func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
if _, err = leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expiry string) {
|
||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (bool, string) {
|
||||
certs, err := loadCertsFromBytes(certData)
|
||||
if err != nil || len(certs) == 0 {
|
||||
return false, ""
|
||||
@@ -143,92 +145,159 @@ func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expi
|
||||
Intermediates: ints,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
if _, err := leaf.Verify(opts); err != nil {
|
||||
if _, err = leaf.Verify(opts); err != nil {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
|
||||
// selftest runs built-in validation using embedded certificates.
|
||||
func selftest() int {
|
||||
type testCase struct {
|
||||
name string
|
||||
caFile string
|
||||
certFile string
|
||||
expectOK bool
|
||||
type testCase struct {
|
||||
name string
|
||||
caFile string
|
||||
certFile string
|
||||
expectOK bool
|
||||
}
|
||||
|
||||
func (tc testCase) Run() error {
|
||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.caFile, err)
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
{name: "ISRG Root X1 validates LE E7", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/le-e7.pem", expectOK: true},
|
||||
{name: "ISRG Root X1 does NOT validate Google WR2", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/goog-wr2.pem", expectOK: false},
|
||||
{name: "GTS R1 validates Google WR2", caFile: "testdata/gts-r1.pem", certFile: "testdata/goog-wr2.pem", expectOK: true},
|
||||
{name: "GTS R1 does NOT validate LE E7", caFile: "testdata/gts-r1.pem", certFile: "testdata/le-e7.pem", expectOK: false},
|
||||
}
|
||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.certFile, err)
|
||||
}
|
||||
|
||||
failures := 0
|
||||
for _, tc := range cases {
|
||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||
pool, err := makePoolFromBytes(caBytes)
|
||||
if err != nil || pool == nil {
|
||||
return fmt.Errorf("selftest: failed to build CA pool for %s: %w", tc.caFile, err)
|
||||
}
|
||||
|
||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||
if ok != tc.expectOK {
|
||||
return fmt.Errorf("%s: unexpected result: got %v, want %v", tc.name, ok, tc.expectOK)
|
||||
}
|
||||
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||
}
|
||||
|
||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var cases = []testCase{
|
||||
{
|
||||
name: "ISRG Root X1 validates LE E7",
|
||||
caFile: "testdata/isrg-root-x1.pem",
|
||||
certFile: "testdata/le-e7.pem",
|
||||
expectOK: true,
|
||||
},
|
||||
{
|
||||
name: "ISRG Root X1 does NOT validate Google WR2",
|
||||
caFile: "testdata/isrg-root-x1.pem",
|
||||
certFile: "testdata/goog-wr2.pem",
|
||||
expectOK: false,
|
||||
},
|
||||
{
|
||||
name: "GTS R1 validates Google WR2",
|
||||
caFile: "testdata/gts-r1.pem",
|
||||
certFile: "testdata/goog-wr2.pem",
|
||||
expectOK: true,
|
||||
},
|
||||
{
|
||||
name: "GTS R1 does NOT validate LE E7",
|
||||
caFile: "testdata/gts-r1.pem",
|
||||
certFile: "testdata/le-e7.pem",
|
||||
expectOK: false,
|
||||
},
|
||||
}
|
||||
|
||||
// selftest runs built-in validation using embedded certificates.
|
||||
func selftest() int {
|
||||
failures := 0
|
||||
for _, tc := range cases {
|
||||
err := tc.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.caFile, err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||
}
|
||||
|
||||
// Verify that both embedded root CAs are detected as self-signed
|
||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||
for _, root := range roots {
|
||||
b, err := embeddedTestdata.ReadFile(root)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.certFile, err)
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
pool, err := makePoolFromBytes(caBytes)
|
||||
if err != nil || pool == nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to build CA pool for %s: %v\n", tc.caFile, err)
|
||||
certs, err := loadCertsFromBytes(b)
|
||||
if err != nil || len(certs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||
if ok != tc.expectOK {
|
||||
fmt.Printf("%s: unexpected result: got %v, want %v\n", tc.name, ok, tc.expectOK)
|
||||
failures++
|
||||
leaf := certs[0]
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||
} else {
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||
}
|
||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that both embedded root CAs are detected as self-signed
|
||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||
for _, root := range roots {
|
||||
b, err := embeddedTestdata.ReadFile(root)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
certs, err := loadCertsFromBytes(b)
|
||||
if err != nil || len(certs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
leaf := certs[0]
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||
} else {
|
||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||
failures++
|
||||
}
|
||||
}
|
||||
if failures == 0 {
|
||||
fmt.Println("selftest: PASS")
|
||||
return 0
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||
return 1
|
||||
}
|
||||
|
||||
if failures == 0 {
|
||||
fmt.Println("selftest: PASS")
|
||||
return 0
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||
return 1
|
||||
// expiryString returns a YYYY-MM-DD date string to display for certificate
|
||||
// expiry. If an explicit exp string is provided, it is used. Otherwise, if a
|
||||
// leaf certificate is available, its NotAfter is formatted. As a last resort,
|
||||
// it falls back to today's date (should not normally happen).
|
||||
func expiryString(leaf *x509.Certificate, exp string) string {
|
||||
if exp != "" {
|
||||
return exp
|
||||
}
|
||||
if leaf != nil {
|
||||
return leaf.NotAfter.Format("2006-01-02")
|
||||
}
|
||||
return time.Now().Format("2006-01-02")
|
||||
}
|
||||
|
||||
// processCert verifies a single certificate file against the provided CA pool
|
||||
// and prints the result in the required format, handling self-signed
|
||||
// certificates specially.
|
||||
func processCert(caPool *x509.CertPool, certPath string) {
|
||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||
name := filepath.Base(certPath)
|
||||
|
||||
// Try to load the leaf cert for self-signed detection and expiry fallback
|
||||
var leaf *x509.Certificate
|
||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||
leaf = certs[0]
|
||||
}
|
||||
|
||||
// Prefer the SELF-SIGNED label if applicable
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
fmt.Printf("%s: OK (expires %s)\n", name, expiryString(leaf, exp))
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s: INVALID\n", name)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -250,38 +319,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, certPath := range os.Args[2:] {
|
||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||
name := filepath.Base(certPath)
|
||||
// Load the leaf once for self-signed detection and potential expiry fallback
|
||||
var leaf *x509.Certificate
|
||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||
leaf = certs[0]
|
||||
}
|
||||
|
||||
// If the certificate is self-signed, prefer the SELF-SIGNED label
|
||||
if isSelfSigned(leaf) {
|
||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Display with the requested format
|
||||
// Example: file: OK (expires 2031-01-01)
|
||||
// Ensure deterministic date formatting
|
||||
// Note: no timezone displayed; date only as per example
|
||||
// If exp ended up empty for some reason, recompute safely
|
||||
if exp == "" {
|
||||
if leaf != nil {
|
||||
exp = leaf.NotAfter.Format("2006-01-02")
|
||||
} else {
|
||||
// fallback to the current date to avoid empty; though shouldn't happen
|
||||
exp = time.Now().Format("2006-01-02")
|
||||
}
|
||||
}
|
||||
fmt.Printf("%s: OK (expires %s)\n", name, exp)
|
||||
} else {
|
||||
fmt.Printf("%s: INVALID\n", name)
|
||||
}
|
||||
}
|
||||
for _, certPath := range os.Args[2:] {
|
||||
processCert(caPool, certPath)
|
||||
}
|
||||
}
|
||||
|
||||
28
cmd/cert-bundler/Dockerfile
Normal file
28
cmd/cert-bundler/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# Build and runtime image for cert-bundler
|
||||
# Usage (from repo root or cmd/cert-bundler directory):
|
||||
# docker build -t cert-bundler:latest -f cmd/cert-bundler/Dockerfile .
|
||||
# docker run --rm -v "$PWD":/work cert-bundler:latest
|
||||
# This expects a /work/bundle.yaml file in the mounted directory and
|
||||
# will write generated bundles to /work/bundle.
|
||||
|
||||
# Build stage
|
||||
FROM golang:1.24.3-alpine AS build
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go module files and download dependencies first for better caching
|
||||
RUN go install git.wntrmute.dev/kyle/goutils/cmd/cert-bundler@v1.13.2 && \
|
||||
mv /go/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||
|
||||
# Runtime stage (kept as golang:alpine per requirement)
|
||||
FROM golang:1.24.3-alpine
|
||||
|
||||
# Create a work directory that users will typically mount into
|
||||
WORKDIR /work
|
||||
VOLUME ["/work"]
|
||||
|
||||
# Copy the built binary from the builder stage
|
||||
COPY --from=build /usr/local/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||
|
||||
# Default command: read bundle.yaml from current directory and output to ./bundle
|
||||
ENTRYPOINT ["/usr/local/bin/cert-bundler"]
|
||||
CMD ["-c", "/work/bundle.yaml", "-o", "/work/bundle"]
|
||||
@@ -1,64 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
_ "embed"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"gopkg.in/yaml.v2"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/bundler"
|
||||
)
|
||||
|
||||
// Config represents the top-level YAML configuration
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
Expiry string `yaml:"expiry"`
|
||||
} `yaml:"config"`
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
Manifest bool `yaml:"manifest"`
|
||||
Formats []string `yaml:"formats"`
|
||||
Encoding string `yaml:"encoding"`
|
||||
}
|
||||
|
||||
var (
|
||||
configFile string
|
||||
outputDir string
|
||||
)
|
||||
|
||||
var formatExtensions = map[string]string{
|
||||
"zip": ".zip",
|
||||
"tgz": ".tar.gz",
|
||||
}
|
||||
|
||||
//go:embed README.txt
|
||||
var readmeContent string
|
||||
|
||||
@@ -77,452 +32,10 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Load and parse configuration
|
||||
cfg, err := loadConfig(configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
|
||||
if err := bundler.Run(configFile, outputDir); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse expiry duration (default 1 year)
|
||||
expiryDuration := 365 * 24 * time.Hour
|
||||
if cfg.Config.Expiry != "" {
|
||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Create output directory if it doesn't exist
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Process each chain group
|
||||
// Pre-allocate createdFiles based on total number of formats across all groups
|
||||
totalFormats := 0
|
||||
for _, group := range cfg.Chains {
|
||||
totalFormats += len(group.Outputs.Formats)
|
||||
}
|
||||
createdFiles := make([]string, 0, totalFormats)
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, err := processChainGroup(groupName, group, expiryDuration)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
}
|
||||
|
||||
// Generate hash file for all created archives
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if err := generateHashFile(hashFile, createdFiles); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Certificate bundling completed successfully")
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Support simple formats like "1y", "6m", "30d"
|
||||
if len(s) < 2 {
|
||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||
}
|
||||
|
||||
unit := s[len(s)-1]
|
||||
value := s[:len(s)-1]
|
||||
|
||||
var multiplier time.Duration
|
||||
switch unit {
|
||||
case 'y', 'Y':
|
||||
multiplier = 365 * 24 * time.Hour
|
||||
case 'm', 'M':
|
||||
multiplier = 30 * 24 * time.Hour
|
||||
case 'd', 'D':
|
||||
multiplier = 24 * time.Hour
|
||||
default:
|
||||
return time.ParseDuration(s)
|
||||
}
|
||||
|
||||
var num int
|
||||
_, err := fmt.Sscanf(value, "%d", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||
}
|
||||
|
||||
return time.Duration(num) * multiplier, nil
|
||||
}
|
||||
|
||||
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
|
||||
// Default encoding to "pem" if not specified
|
||||
encoding := group.Outputs.Encoding
|
||||
if encoding == "" {
|
||||
encoding = "pem"
|
||||
}
|
||||
|
||||
// Collect certificates from all chains in the group
|
||||
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare files for inclusion in archives
|
||||
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create archives for the entire group
|
||||
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing
|
||||
func loadAndCollectCerts(chains []CertChain, outputs Outputs, expiryDuration time.Duration) ([]*x509.Certificate, []certWithPath, error) {
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
|
||||
for _, chain := range chains {
|
||||
// Load root certificate
|
||||
rootCert, err := certlib.LoadCertificate(chain.Root)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %v", chain.Root, err)
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, rootCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: rootCert,
|
||||
path: chain.Root,
|
||||
})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, err := certlib.LoadCertificate(intPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %v", intPath, err)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if err := intCert.CheckSignatureFrom(rootCert); err != nil {
|
||||
return nil, nil, fmt.Errorf("intermediate %s is not properly signed by root %s: %v", intPath, chain.Root, err)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
singleFileCerts = append(singleFileCerts, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
individualCerts = append(individualCerts, certWithPath{
|
||||
cert: intCert,
|
||||
path: intPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return singleFileCerts, individualCerts, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives
|
||||
func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []certWithPath, outputs Outputs, encoding string) ([]fileEntry, error) {
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Handle a single bundle file
|
||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %v", err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
|
||||
// Handle individual files
|
||||
if outputs.IncludeIndividual {
|
||||
for _, cp := range individualCerts {
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %v", cp.path, err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate manifest if requested
|
||||
if outputs.Manifest {
|
||||
manifestContent := generateManifest(archiveFiles)
|
||||
archiveFiles = append(archiveFiles, fileEntry{
|
||||
name: "MANIFEST",
|
||||
content: manifestContent,
|
||||
})
|
||||
}
|
||||
|
||||
return archiveFiles, nil
|
||||
}
|
||||
|
||||
// createArchiveFiles creates archive files in the specified formats
|
||||
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
|
||||
createdFiles := make([]string, 0, len(formats))
|
||||
|
||||
for _, format := range formats {
|
||||
ext, ok := formatExtensions[format]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %v", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
createdFiles = append(createdFiles, archivePath)
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||
now := time.Now()
|
||||
expiryThreshold := now.Add(expiryDuration)
|
||||
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s has EXPIRED (expired %d days ago)\n", path, -daysUntilExpiry)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
name string
|
||||
content []byte
|
||||
}
|
||||
|
||||
type certWithPath struct {
|
||||
cert *x509.Certificate
|
||||
path string
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file
|
||||
func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding string, isSingle bool) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
case "pem":
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
case "der":
|
||||
if isSingle {
|
||||
// For single file in DER, concatenate all cert DER bytes
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
// Individual DER file (should only have one cert)
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
// Add DER version
|
||||
if isSingle {
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else {
|
||||
if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format
|
||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||
}
|
||||
return pemContent
|
||||
}
|
||||
|
||||
func generateManifest(files []fileEntry) []byte {
|
||||
var manifest strings.Builder
|
||||
for _, file := range files {
|
||||
if file.name == "MANIFEST" {
|
||||
continue
|
||||
}
|
||||
hash := sha256.Sum256(file.content)
|
||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||
}
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
for _, file := range files {
|
||||
fw, err := w.Create(file.name)
|
||||
if err != nil {
|
||||
w.Close()
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
if _, err := fw.Write(file.content); err != nil {
|
||||
w.Close()
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations
|
||||
if err := w.Close(); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Mode: 0644,
|
||||
Size: int64(len(file.content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
tw.Close()
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(file.content); err != nil {
|
||||
tw.Close()
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations in the correct order
|
||||
if err := tw.Close(); err != nil {
|
||||
gw.Close()
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func generateHashFile(path string, files []string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
This project is an exploration into the utility of Jetbrains' Junie
|
||||
to write smaller but tedious programs.
|
||||
|
||||
Task: build a certificate bundling tool in cmd/cert-bundler. It
|
||||
creates archives of certificates chains.
|
||||
|
||||
A YAML file for this looks something like:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: roots/core-ca.pem
|
||||
intermediates:
|
||||
- int/cca1.pem
|
||||
- int/cca2.pem
|
||||
- int/cca3.pem
|
||||
- root: roots/ssh-ca.pem
|
||||
intermediates:
|
||||
- ssh/ssh_dmz1.pem
|
||||
- ssh/ssh_internal.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
```
|
||||
|
||||
Some requirements:
|
||||
|
||||
1. First, all the certificates should be loaded.
|
||||
2. For each root, each of the indivudal intermediates should be
|
||||
checked to make sure they are properly signed by the root CA.
|
||||
3. The program should optionally take an expiration period (defaulting
|
||||
to one year), specified in config.expiration, and if any certificate
|
||||
is within that expiration period, a warning should be printed.
|
||||
4. If outputs.include_single is true, all certificates under chains
|
||||
should be concatenated into a single file.
|
||||
5. If outputs.include_individual is true, all certificates under
|
||||
chains should be included at the root level (e.g. int/cca2.pem
|
||||
would be cca2.pem in the archive).
|
||||
6. If bundle.manifest is true, a "MANIFEST" file is created with
|
||||
SHA256 sums of each file included in the archive.
|
||||
7. For each of the formats, create an archive file in the output
|
||||
directory (specified with `-o`) with that format.
|
||||
- If zip is included, create a .zip file.
|
||||
- If tgz is included, create a .tar.gz file with default compression
|
||||
levels.
|
||||
- All archive files should include any generated files (single
|
||||
and/or individual) in the top-level directory.
|
||||
8. In the output directory, create a file with the same name as
|
||||
config.hashes that contains the SHA256 sum of all files created.
|
||||
|
||||
-----
|
||||
|
||||
The outputs.include_single and outputs.include_individual describe
|
||||
what should go in the final archive. If both are specified, the output
|
||||
archive should include both a single bundle.pem and each individual
|
||||
certificate, for example.
|
||||
|
||||
-----
|
||||
|
||||
As it stands, given the following `bundle.yaml`:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
google_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- tgz
|
||||
lets_encrypt:
|
||||
certs:
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: false
|
||||
include_individual: true
|
||||
manifest: false
|
||||
formats:
|
||||
- zip
|
||||
```
|
||||
|
||||
The program outputs the following files:
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs_0.tgz (contains individual certs)
|
||||
- core_certs_0.zip (contains individual certs)
|
||||
- core_certs_1.tgz (contains core_certs.pem)
|
||||
- core_certs_1.zip (contains core_certs.pem)
|
||||
- google_certs_0.tgz
|
||||
- lets_encrypt_0.zip
|
||||
|
||||
It should output
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs.tgz
|
||||
- core_certs.zip
|
||||
- google_certs.tgz
|
||||
- lets_encrypt.zip
|
||||
|
||||
core_certs.* should contain `bundle.pem` and all the individual
|
||||
certs. There should be no _$n$ variants of archives.
|
||||
|
||||
-----
|
||||
|
||||
Add an additional field to outputs: encoding. It should accept one of
|
||||
`der`, `pem`, or `both`. If `der`, certificates should be output as a
|
||||
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
|
||||
should be output as a `.pem` file containing a PEM-encoded certificate.
|
||||
If both, both the `.crt` and `.pem` certificate should be included.
|
||||
|
||||
For example, given the previous config, if `encoding` is der, the
|
||||
google_certs.tgz archive should contain
|
||||
|
||||
- bundle.crt
|
||||
- MANIFEST
|
||||
|
||||
Or with lets_encrypt.zip:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- le-e7.crt
|
||||
|
||||
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.pem
|
||||
|
||||
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.crt
|
||||
- le-e7.pem
|
||||
|
||||
-----
|
||||
|
||||
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
|
||||
|
||||
-----
|
||||
|
||||
Move the format extensions to a global variable.
|
||||
|
||||
-----
|
||||
|
||||
Write a README.txt with a description of the bundle.yaml format.
|
||||
|
||||
Additionally, update the help text for the program (e.g. with `-h`)
|
||||
to provide the same detailed information.
|
||||
|
||||
-----
|
||||
|
||||
It may be easier to embed the README.txt in the program on build.
|
||||
|
||||
-----
|
||||
|
||||
For the archive (tar.gz and zip) writers, make sure errors are
|
||||
checked at the end, and don't just defer the close operations.
|
||||
|
||||
|
||||
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
@@ -2,6 +2,19 @@ config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
weird:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
- root: pems/isrg-root-x1.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
|
||||
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
@@ -1,4 +0,0 @@
|
||||
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
|
||||
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
|
||||
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
|
||||
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip
|
||||
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Binary file not shown.
@@ -1,14 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
@@ -23,6 +24,13 @@ var (
|
||||
verbose bool
|
||||
)
|
||||
|
||||
var (
|
||||
strOK = "OK"
|
||||
strExpired = "EXPIRED"
|
||||
strRevoked = "REVOKED"
|
||||
strUnknown = "UNKNOWN"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.BoolVar(&hardfail, "hardfail", false, "treat revocation check failures as fatal")
|
||||
flag.DurationVar(&timeout, "timeout", 10*time.Second, "network timeout for OCSP/CRL fetches and TLS site connects")
|
||||
@@ -42,16 +50,16 @@ func main() {
|
||||
for _, target := range flag.Args() {
|
||||
status, err := processTarget(target)
|
||||
switch status {
|
||||
case "OK":
|
||||
fmt.Printf("%s: OK\n", target)
|
||||
case "EXPIRED":
|
||||
fmt.Printf("%s: EXPIRED: %v\n", target, err)
|
||||
case strOK:
|
||||
fmt.Printf("%s: %s\n", target, strOK)
|
||||
case strExpired:
|
||||
fmt.Printf("%s: %s: %v\n", target, strExpired, err)
|
||||
exitCode = 1
|
||||
case "REVOKED":
|
||||
fmt.Printf("%s: REVOKED\n", target)
|
||||
case strRevoked:
|
||||
fmt.Printf("%s: %s\n", target, strRevoked)
|
||||
exitCode = 1
|
||||
case "UNKNOWN":
|
||||
fmt.Printf("%s: UNKNOWN: %v\n", target, err)
|
||||
case strUnknown:
|
||||
fmt.Printf("%s: %s: %v\n", target, strUnknown, err)
|
||||
if hardfail {
|
||||
// In hardfail, treat unknown as failure
|
||||
exitCode = 1
|
||||
@@ -67,74 +75,77 @@ func processTarget(target string) (string, error) {
|
||||
return checkFile(target)
|
||||
}
|
||||
|
||||
// Not a file; treat as site
|
||||
return checkSite(target)
|
||||
}
|
||||
|
||||
func checkFile(path string) (string, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return "UNKNOWN", err
|
||||
// Prefer high-level helpers from certlib to load certificates from disk
|
||||
if certs, err := certlib.LoadCertificates(path); err == nil && len(certs) > 0 {
|
||||
// Evaluate the first certificate (leaf) by default
|
||||
return evaluateCert(certs[0])
|
||||
}
|
||||
|
||||
// Try PEM first; if that fails, try single DER cert
|
||||
certs, err := certlib.ReadCertificates(in)
|
||||
if err != nil || len(certs) == 0 {
|
||||
cert, _, derr := certlib.ReadCertificate(in)
|
||||
if derr != nil || cert == nil {
|
||||
if err == nil {
|
||||
err = derr
|
||||
}
|
||||
return "UNKNOWN", err
|
||||
}
|
||||
return evaluateCert(cert)
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil || cert == nil {
|
||||
return strUnknown, err
|
||||
}
|
||||
|
||||
// Evaluate the first certificate (leaf) by default
|
||||
return evaluateCert(certs[0])
|
||||
return evaluateCert(cert)
|
||||
}
|
||||
|
||||
func checkSite(hostport string) (string, error) {
|
||||
// Use certlib/hosts to parse host/port (supports https URLs and host:port)
|
||||
target, err := hosts.ParseHost(hostport)
|
||||
if err != nil {
|
||||
return "UNKNOWN", err
|
||||
return strUnknown, err
|
||||
}
|
||||
|
||||
d := &net.Dialer{Timeout: timeout}
|
||||
conn, err := tls.DialWithDialer(d, "tcp", target.String(), &tls.Config{InsecureSkipVerify: true, ServerName: target.Host})
|
||||
tcfg := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: target.Host,
|
||||
} // #nosec G402 -- CLI tool only verifies revocation
|
||||
td := &tls.Dialer{NetDialer: d, Config: tcfg}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
conn, err := td.DialContext(ctx, "tcp", target.String())
|
||||
if err != nil {
|
||||
return "UNKNOWN", err
|
||||
return strUnknown, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
tconn, ok := conn.(*tls.Conn)
|
||||
if !ok {
|
||||
return strUnknown, errors.New("connection is not TLS")
|
||||
}
|
||||
|
||||
state := tconn.ConnectionState()
|
||||
if len(state.PeerCertificates) == 0 {
|
||||
return "UNKNOWN", errors.New("no peer certificates presented")
|
||||
return strUnknown, errors.New("no peer certificates presented")
|
||||
}
|
||||
return evaluateCert(state.PeerCertificates[0])
|
||||
}
|
||||
|
||||
func evaluateCert(cert *x509.Certificate) (string, error) {
|
||||
// Expiry check
|
||||
now := time.Now()
|
||||
if !now.Before(cert.NotAfter) {
|
||||
return "EXPIRED", fmt.Errorf("expired at %s", cert.NotAfter)
|
||||
}
|
||||
if !now.After(cert.NotBefore) {
|
||||
return "EXPIRED", fmt.Errorf("not valid until %s", cert.NotBefore)
|
||||
}
|
||||
|
||||
// Revocation check using certlib/revoke
|
||||
// Delegate validity and revocation checks to certlib/revoke helper.
|
||||
// It returns revoked=true for both revoked and expired/not-yet-valid.
|
||||
// Map those cases back to our statuses using the returned error text.
|
||||
revoked, ok, err := revoke.VerifyCertificateError(cert)
|
||||
if revoked {
|
||||
// If revoked is true, ok will be true per implementation, err may describe why
|
||||
return "REVOKED", err
|
||||
if err != nil {
|
||||
msg := err.Error()
|
||||
if strings.Contains(msg, "expired") || strings.Contains(msg, "isn't valid until") ||
|
||||
strings.Contains(msg, "not valid until") {
|
||||
return strExpired, err
|
||||
}
|
||||
}
|
||||
return strRevoked, err
|
||||
}
|
||||
if !ok {
|
||||
// Revocation status could not be determined
|
||||
return "UNKNOWN", err
|
||||
return strUnknown, err
|
||||
}
|
||||
|
||||
return "OK", nil
|
||||
return strOK, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
@@ -20,20 +23,26 @@ func main() {
|
||||
server += ":443"
|
||||
}
|
||||
|
||||
var chain string
|
||||
|
||||
conn, err := tls.Dial("tcp", server, nil)
|
||||
d := &tls.Dialer{Config: &tls.Config{}} // #nosec G402
|
||||
nc, err := d.DialContext(context.Background(), "tcp", server)
|
||||
die.If(err)
|
||||
conn, ok := nc.(*tls.Conn)
|
||||
if !ok {
|
||||
die.With("invalid TLS connection (not a *tls.Conn)")
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
details := conn.ConnectionState()
|
||||
var chain strings.Builder
|
||||
for _, cert := range details.PeerCertificates {
|
||||
p := pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
chain += string(pem.EncodeToMemory(&p))
|
||||
chain.Write(pem.EncodeToMemory(&p))
|
||||
}
|
||||
|
||||
fmt.Println(chain)
|
||||
fmt.Fprintln(os.Stdout, chain.String())
|
||||
}
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func certPublic(cert *x509.Certificate) string {
|
||||
switch pub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return fmt.Sprintf("RSA-%d", pub.N.BitLen())
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P256():
|
||||
return "ECDSA-prime256v1"
|
||||
case elliptic.P384():
|
||||
return "ECDSA-secp384r1"
|
||||
case elliptic.P521():
|
||||
return "ECDSA-secp521r1"
|
||||
default:
|
||||
return "ECDSA (unknown curve)"
|
||||
}
|
||||
case *dsa.PublicKey:
|
||||
return "DSA"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func displayName(name pkix.Name) string {
|
||||
var ns []string
|
||||
|
||||
if name.CommonName != "" {
|
||||
ns = append(ns, name.CommonName)
|
||||
}
|
||||
|
||||
for i := range name.Country {
|
||||
ns = append(ns, fmt.Sprintf("C=%s", name.Country[i]))
|
||||
}
|
||||
|
||||
for i := range name.Organization {
|
||||
ns = append(ns, fmt.Sprintf("O=%s", name.Organization[i]))
|
||||
}
|
||||
|
||||
for i := range name.OrganizationalUnit {
|
||||
ns = append(ns, fmt.Sprintf("OU=%s", name.OrganizationalUnit[i]))
|
||||
}
|
||||
|
||||
for i := range name.Locality {
|
||||
ns = append(ns, fmt.Sprintf("L=%s", name.Locality[i]))
|
||||
}
|
||||
|
||||
for i := range name.Province {
|
||||
ns = append(ns, fmt.Sprintf("ST=%s", name.Province[i]))
|
||||
}
|
||||
|
||||
if len(ns) > 0 {
|
||||
return "/" + strings.Join(ns, "/")
|
||||
}
|
||||
|
||||
return "*** no subject information ***"
|
||||
}
|
||||
|
||||
func keyUsages(ku x509.KeyUsage) string {
|
||||
var uses []string
|
||||
|
||||
for u, s := range keyUsage {
|
||||
if (ku & u) != 0 {
|
||||
uses = append(uses, s)
|
||||
}
|
||||
}
|
||||
sort.Strings(uses)
|
||||
|
||||
return strings.Join(uses, ", ")
|
||||
}
|
||||
|
||||
func extUsage(ext []x509.ExtKeyUsage) string {
|
||||
ns := make([]string, 0, len(ext))
|
||||
for i := range ext {
|
||||
ns = append(ns, extKeyUsages[ext[i]])
|
||||
}
|
||||
sort.Strings(ns)
|
||||
|
||||
return strings.Join(ns, ", ")
|
||||
}
|
||||
|
||||
func showBasicConstraints(cert *x509.Certificate) {
|
||||
fmt.Printf("\tBasic constraints: ")
|
||||
if cert.BasicConstraintsValid {
|
||||
fmt.Printf("valid")
|
||||
} else {
|
||||
fmt.Printf("invalid")
|
||||
}
|
||||
|
||||
if cert.IsCA {
|
||||
fmt.Printf(", is a CA certificate")
|
||||
if !cert.BasicConstraintsValid {
|
||||
fmt.Printf(" (basic constraint failure)")
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("is not a CA certificate")
|
||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||
fmt.Printf(" (key encipherment usage enabled!)")
|
||||
}
|
||||
}
|
||||
|
||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||
fmt.Printf(", max path length %d", cert.MaxPathLen)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
const oneTrueDateFormat = "2006-01-02T15:04:05-0700"
|
||||
|
||||
var (
|
||||
dateFormat string
|
||||
showHash bool // if true, print a SHA256 hash of the certificate's Raw field
|
||||
)
|
||||
|
||||
func wrapPrint(text string, indent int) {
|
||||
tabs := ""
|
||||
for i := 0; i < indent; i++ {
|
||||
tabs += "\t"
|
||||
}
|
||||
|
||||
fmt.Printf(tabs+"%s\n", wrap(text, indent))
|
||||
}
|
||||
|
||||
func displayCert(cert *x509.Certificate) {
|
||||
fmt.Println("CERTIFICATE")
|
||||
if showHash {
|
||||
fmt.Println(wrap(fmt.Sprintf("SHA256: %x", sha256.Sum256(cert.Raw)), 0))
|
||||
}
|
||||
fmt.Println(wrap("Subject: "+displayName(cert.Subject), 0))
|
||||
fmt.Println(wrap("Issuer: "+displayName(cert.Issuer), 0))
|
||||
fmt.Printf("\tSignature algorithm: %s / %s\n", sigAlgoPK(cert.SignatureAlgorithm),
|
||||
sigAlgoHash(cert.SignatureAlgorithm))
|
||||
fmt.Println("Details:")
|
||||
wrapPrint("Public key: "+certPublic(cert), 1)
|
||||
fmt.Printf("\tSerial number: %s\n", cert.SerialNumber)
|
||||
|
||||
if len(cert.AuthorityKeyId) > 0 {
|
||||
fmt.Printf("\t%s\n", wrap("AKI: "+dumpHex(cert.AuthorityKeyId), 1))
|
||||
}
|
||||
if len(cert.SubjectKeyId) > 0 {
|
||||
fmt.Printf("\t%s\n", wrap("SKI: "+dumpHex(cert.SubjectKeyId), 1))
|
||||
}
|
||||
|
||||
wrapPrint("Valid from: "+cert.NotBefore.Format(dateFormat), 1)
|
||||
fmt.Printf("\t until: %s\n", cert.NotAfter.Format(dateFormat))
|
||||
fmt.Printf("\tKey usages: %s\n", keyUsages(cert.KeyUsage))
|
||||
|
||||
if len(cert.ExtKeyUsage) > 0 {
|
||||
fmt.Printf("\tExtended usages: %s\n", extUsage(cert.ExtKeyUsage))
|
||||
}
|
||||
|
||||
showBasicConstraints(cert)
|
||||
|
||||
validNames := make([]string, 0, len(cert.DNSNames)+len(cert.EmailAddresses)+len(cert.IPAddresses))
|
||||
for i := range cert.DNSNames {
|
||||
validNames = append(validNames, "dns:"+cert.DNSNames[i])
|
||||
}
|
||||
|
||||
for i := range cert.EmailAddresses {
|
||||
validNames = append(validNames, "email:"+cert.EmailAddresses[i])
|
||||
}
|
||||
|
||||
for i := range cert.IPAddresses {
|
||||
validNames = append(validNames, "ip:"+cert.IPAddresses[i].String())
|
||||
}
|
||||
|
||||
sans := fmt.Sprintf("SANs (%d): %s\n", len(validNames), strings.Join(validNames, ", "))
|
||||
wrapPrint(sans, 1)
|
||||
|
||||
l := len(cert.IssuingCertificateURL)
|
||||
if l != 0 {
|
||||
var aia string
|
||||
if l == 1 {
|
||||
aia = "AIA"
|
||||
} else {
|
||||
aia = "AIAs"
|
||||
}
|
||||
wrapPrint(fmt.Sprintf("%d %s:", l, aia), 1)
|
||||
for _, url := range cert.IssuingCertificateURL {
|
||||
wrapPrint(url, 2)
|
||||
}
|
||||
}
|
||||
|
||||
l = len(cert.OCSPServer)
|
||||
if l > 0 {
|
||||
title := "OCSP server"
|
||||
if l > 1 {
|
||||
title += "s"
|
||||
}
|
||||
wrapPrint(title+":\n", 1)
|
||||
for _, ocspServer := range cert.OCSPServer {
|
||||
wrapPrint(fmt.Sprintf("- %s\n", ocspServer), 2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func displayAllCerts(in []byte, leafOnly bool) {
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
certs, _, err = certlib.ParseCertificatesDER(in, "")
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to parse certificates")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(certs) == 0 {
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
if leafOnly {
|
||||
displayCert(certs[0])
|
||||
return
|
||||
}
|
||||
|
||||
for i := range certs {
|
||||
displayCert(certs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func displayAllCertsWeb(uri string, leafOnly bool) {
|
||||
ci := getConnInfo(uri)
|
||||
conn, err := tls.Dial("tcp", ci.Addr, permissiveConfig())
|
||||
if err != nil {
|
||||
lib.Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
conn.Close()
|
||||
|
||||
conn, err = tls.Dial("tcp", ci.Addr, verifyConfig(ci.Host))
|
||||
if err == nil {
|
||||
err = conn.VerifyHostname(ci.Host)
|
||||
if err == nil {
|
||||
state = conn.ConnectionState()
|
||||
}
|
||||
conn.Close()
|
||||
} else {
|
||||
lib.Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
}
|
||||
|
||||
if len(state.PeerCertificates) == 0 {
|
||||
lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
if leafOnly {
|
||||
displayCert(state.PeerCertificates[0])
|
||||
return
|
||||
}
|
||||
|
||||
if len(state.VerifiedChains) == 0 {
|
||||
lib.Warnx("no verified chains found; using peer chain")
|
||||
for i := range state.PeerCertificates {
|
||||
displayCert(state.PeerCertificates[i])
|
||||
}
|
||||
} else {
|
||||
fmt.Println("TLS chain verified successfully.")
|
||||
for i := range state.VerifiedChains {
|
||||
fmt.Printf("--- Verified certificate chain %d ---\n", i+1)
|
||||
for j := range state.VerifiedChains[i] {
|
||||
displayCert(state.VerifiedChains[i][j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var leafOnly bool
|
||||
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
|
||||
flag.StringVar(&dateFormat, "s", oneTrueDateFormat, "date `format` in Go time format")
|
||||
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() == 0 || (flag.NArg() == 1 && flag.Arg(0) == "-") {
|
||||
certs, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
lib.Warn(err, "couldn't read certificates from standard input")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// This is needed for getting certs from JSON/jq.
|
||||
certs = bytes.TrimSpace(certs)
|
||||
certs = bytes.Replace(certs, []byte(`\n`), []byte{0xa}, -1)
|
||||
certs = bytes.Trim(certs, `"`)
|
||||
displayAllCerts(certs, leafOnly)
|
||||
} else {
|
||||
for _, filename := range flag.Args() {
|
||||
fmt.Printf("--%s ---\n", filename)
|
||||
if strings.HasPrefix(filename, "https://") {
|
||||
displayAllCertsWeb(filename, leafOnly)
|
||||
} else {
|
||||
in, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
|
||||
displayAllCerts(in, leafOnly)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
376
cmd/certdump/main.go
Normal file
376
cmd/certdump/main.go
Normal file
@@ -0,0 +1,376 @@
|
||||
//lint:file-ignore SA1019 allow strict compatibility for old certs
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kr/text"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
|
||||
// "\2: \1,")
|
||||
|
||||
const (
|
||||
sSHA256 = "SHA256"
|
||||
sSHA512 = "SHA512"
|
||||
)
|
||||
|
||||
var keyUsage = map[x509.KeyUsage]string{
|
||||
x509.KeyUsageDigitalSignature: "digital signature",
|
||||
x509.KeyUsageContentCommitment: "content committment",
|
||||
x509.KeyUsageKeyEncipherment: "key encipherment",
|
||||
x509.KeyUsageKeyAgreement: "key agreement",
|
||||
x509.KeyUsageDataEncipherment: "data encipherment",
|
||||
x509.KeyUsageCertSign: "cert sign",
|
||||
x509.KeyUsageCRLSign: "crl sign",
|
||||
x509.KeyUsageEncipherOnly: "encipher only",
|
||||
x509.KeyUsageDecipherOnly: "decipher only",
|
||||
}
|
||||
|
||||
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
||||
x509.ExtKeyUsageAny: "any",
|
||||
x509.ExtKeyUsageServerAuth: "server auth",
|
||||
x509.ExtKeyUsageClientAuth: "client auth",
|
||||
x509.ExtKeyUsageCodeSigning: "code signing",
|
||||
x509.ExtKeyUsageEmailProtection: "s/mime",
|
||||
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
||||
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
||||
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
||||
x509.ExtKeyUsageTimeStamping: "timestamping",
|
||||
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
||||
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
||||
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
||||
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
|
||||
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
|
||||
}
|
||||
|
||||
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
||||
return "RSA"
|
||||
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
|
||||
return "RSA-PSS"
|
||||
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
return "ECDSA"
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return "DSA"
|
||||
case x509.PureEd25519:
|
||||
return "Ed25519"
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown public key algorithm"
|
||||
default:
|
||||
return "unknown public key algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
||||
return sSHA256
|
||||
case x509.SHA256WithRSAPSS:
|
||||
return sSHA256
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.SHA384WithRSAPSS:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
return sSHA512
|
||||
case x509.SHA512WithRSAPSS:
|
||||
return sSHA512
|
||||
case x509.PureEd25519:
|
||||
return sSHA512
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown hash algorithm"
|
||||
default:
|
||||
return "unknown hash algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
s := " "
|
||||
var sSb97 strings.Builder
|
||||
for range n {
|
||||
sSb97.WriteString(" ")
|
||||
}
|
||||
s += sSb97.String()
|
||||
return s
|
||||
}
|
||||
|
||||
func indentLen(n int) int {
|
||||
return 4 + (8 * n)
|
||||
}
|
||||
|
||||
// this isn't real efficient, but that's not a problem here.
|
||||
func wrap(s string, indent int) string {
|
||||
if indent > 3 {
|
||||
indent = 3
|
||||
}
|
||||
|
||||
wrapped := text.Wrap(s, maxLine)
|
||||
lines := strings.SplitN(wrapped, "\n", 2)
|
||||
if len(lines) == 1 {
|
||||
return lines[0]
|
||||
}
|
||||
|
||||
if (maxLine - indentLen(indent)) <= 0 {
|
||||
panic("too much indentation")
|
||||
}
|
||||
|
||||
rest := strings.Join(lines[1:], " ")
|
||||
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
||||
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
return lib.HexEncode(in, lib.HexEncodeUpperColon)
|
||||
}
|
||||
|
||||
func certPublic(cert *x509.Certificate) string {
|
||||
switch pub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return fmt.Sprintf("RSA-%d", pub.N.BitLen())
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P256():
|
||||
return "ECDSA-prime256v1"
|
||||
case elliptic.P384():
|
||||
return "ECDSA-secp384r1"
|
||||
case elliptic.P521():
|
||||
return "ECDSA-secp521r1"
|
||||
default:
|
||||
return "ECDSA (unknown curve)"
|
||||
}
|
||||
case *dsa.PublicKey:
|
||||
return "DSA"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func displayName(name pkix.Name) string {
|
||||
var ns []string
|
||||
|
||||
if name.CommonName != "" {
|
||||
ns = append(ns, name.CommonName)
|
||||
}
|
||||
|
||||
for i := range name.Country {
|
||||
ns = append(ns, fmt.Sprintf("C=%s", name.Country[i]))
|
||||
}
|
||||
|
||||
for i := range name.Organization {
|
||||
ns = append(ns, fmt.Sprintf("O=%s", name.Organization[i]))
|
||||
}
|
||||
|
||||
for i := range name.OrganizationalUnit {
|
||||
ns = append(ns, fmt.Sprintf("OU=%s", name.OrganizationalUnit[i]))
|
||||
}
|
||||
|
||||
for i := range name.Locality {
|
||||
ns = append(ns, fmt.Sprintf("L=%s", name.Locality[i]))
|
||||
}
|
||||
|
||||
for i := range name.Province {
|
||||
ns = append(ns, fmt.Sprintf("ST=%s", name.Province[i]))
|
||||
}
|
||||
|
||||
if len(ns) > 0 {
|
||||
return "/" + strings.Join(ns, "/")
|
||||
}
|
||||
|
||||
return "*** no subject information ***"
|
||||
}
|
||||
|
||||
func keyUsages(ku x509.KeyUsage) string {
|
||||
var uses []string
|
||||
|
||||
for u, s := range keyUsage {
|
||||
if (ku & u) != 0 {
|
||||
uses = append(uses, s)
|
||||
}
|
||||
}
|
||||
sort.Strings(uses)
|
||||
|
||||
return strings.Join(uses, ", ")
|
||||
}
|
||||
|
||||
func extUsage(ext []x509.ExtKeyUsage) string {
|
||||
ns := make([]string, 0, len(ext))
|
||||
for i := range ext {
|
||||
ns = append(ns, extKeyUsages[ext[i]])
|
||||
}
|
||||
sort.Strings(ns)
|
||||
|
||||
return strings.Join(ns, ", ")
|
||||
}
|
||||
|
||||
func showBasicConstraints(cert *x509.Certificate) {
|
||||
fmt.Fprint(os.Stdout, "\tBasic constraints: ")
|
||||
if cert.BasicConstraintsValid {
|
||||
fmt.Fprint(os.Stdout, "valid")
|
||||
} else {
|
||||
fmt.Fprint(os.Stdout, "invalid")
|
||||
}
|
||||
|
||||
if cert.IsCA {
|
||||
fmt.Fprint(os.Stdout, ", is a CA certificate")
|
||||
if !cert.BasicConstraintsValid {
|
||||
fmt.Fprint(os.Stdout, " (basic constraint failure)")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprint(os.Stdout, ", is not a CA certificate")
|
||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||
fmt.Fprint(os.Stdout, " (key encipherment usage enabled!)")
|
||||
}
|
||||
}
|
||||
|
||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||
fmt.Fprintf(os.Stdout, ", max path length %d", cert.MaxPathLen)
|
||||
}
|
||||
|
||||
fmt.Fprintln(os.Stdout)
|
||||
}
|
||||
|
||||
const oneTrueDateFormat = "2006-01-02T15:04:05-0700"
|
||||
|
||||
var (
|
||||
dateFormat string
|
||||
showHash bool // if true, print a SHA256 hash of the certificate's Raw field
|
||||
)
|
||||
|
||||
func wrapPrint(text string, indent int) {
|
||||
tabs := ""
|
||||
var tabsSb140 strings.Builder
|
||||
for range indent {
|
||||
tabsSb140.WriteString("\t")
|
||||
}
|
||||
tabs += tabsSb140.String()
|
||||
|
||||
fmt.Fprintf(os.Stdout, tabs+"%s\n", wrap(text, indent))
|
||||
}
|
||||
|
||||
func displayCert(cert *x509.Certificate) {
|
||||
fmt.Fprintln(os.Stdout, "CERTIFICATE")
|
||||
if showHash {
|
||||
fmt.Fprintln(os.Stdout, wrap(fmt.Sprintf("SHA256: %x", sha256.Sum256(cert.Raw)), 0))
|
||||
}
|
||||
fmt.Fprintln(os.Stdout, wrap("Subject: "+displayName(cert.Subject), 0))
|
||||
fmt.Fprintln(os.Stdout, wrap("Issuer: "+displayName(cert.Issuer), 0))
|
||||
fmt.Fprintf(os.Stdout, "\tSignature algorithm: %s / %s\n", sigAlgoPK(cert.SignatureAlgorithm),
|
||||
sigAlgoHash(cert.SignatureAlgorithm))
|
||||
fmt.Fprintln(os.Stdout, "Details:")
|
||||
wrapPrint("Public key: "+certPublic(cert), 1)
|
||||
fmt.Fprintf(os.Stdout, "\tSerial number: %s\n", cert.SerialNumber)
|
||||
|
||||
if len(cert.AuthorityKeyId) > 0 {
|
||||
fmt.Fprintf(os.Stdout, "\t%s\n", wrap("AKI: "+dumpHex(cert.AuthorityKeyId), 1))
|
||||
}
|
||||
if len(cert.SubjectKeyId) > 0 {
|
||||
fmt.Fprintf(os.Stdout, "\t%s\n", wrap("SKI: "+dumpHex(cert.SubjectKeyId), 1))
|
||||
}
|
||||
|
||||
wrapPrint("Valid from: "+cert.NotBefore.Format(dateFormat), 1)
|
||||
fmt.Fprintf(os.Stdout, "\t until: %s\n", cert.NotAfter.Format(dateFormat))
|
||||
fmt.Fprintf(os.Stdout, "\tKey usages: %s\n", keyUsages(cert.KeyUsage))
|
||||
|
||||
if len(cert.ExtKeyUsage) > 0 {
|
||||
fmt.Fprintf(os.Stdout, "\tExtended usages: %s\n", extUsage(cert.ExtKeyUsage))
|
||||
}
|
||||
|
||||
showBasicConstraints(cert)
|
||||
|
||||
validNames := make([]string, 0, len(cert.DNSNames)+len(cert.EmailAddresses)+len(cert.IPAddresses))
|
||||
for i := range cert.DNSNames {
|
||||
validNames = append(validNames, "dns:"+cert.DNSNames[i])
|
||||
}
|
||||
|
||||
for i := range cert.EmailAddresses {
|
||||
validNames = append(validNames, "email:"+cert.EmailAddresses[i])
|
||||
}
|
||||
|
||||
for i := range cert.IPAddresses {
|
||||
validNames = append(validNames, "ip:"+cert.IPAddresses[i].String())
|
||||
}
|
||||
|
||||
sans := fmt.Sprintf("SANs (%d): %s\n", len(validNames), strings.Join(validNames, ", "))
|
||||
wrapPrint(sans, 1)
|
||||
|
||||
l := len(cert.IssuingCertificateURL)
|
||||
if l != 0 {
|
||||
var aia string
|
||||
if l == 1 {
|
||||
aia = "AIA"
|
||||
} else {
|
||||
aia = "AIAs"
|
||||
}
|
||||
wrapPrint(fmt.Sprintf("%d %s:", l, aia), 1)
|
||||
for _, url := range cert.IssuingCertificateURL {
|
||||
wrapPrint(url, 2)
|
||||
}
|
||||
}
|
||||
|
||||
l = len(cert.OCSPServer)
|
||||
if l > 0 {
|
||||
title := "OCSP server"
|
||||
if l > 1 {
|
||||
title += "s"
|
||||
}
|
||||
wrapPrint(title+":\n", 1)
|
||||
for _, ocspServer := range cert.OCSPServer {
|
||||
wrapPrint(fmt.Sprintf("- %s\n", ocspServer), 2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var leafOnly bool
|
||||
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
|
||||
flag.StringVar(&dateFormat, "s", oneTrueDateFormat, "date `format` in Go time format")
|
||||
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
|
||||
flag.Parse()
|
||||
|
||||
opts := &certlib.FetcherOpts{
|
||||
SkipVerify: true,
|
||||
Roots: nil,
|
||||
}
|
||||
|
||||
for _, filename := range flag.Args() {
|
||||
fmt.Fprintf(os.Stdout, "--%s ---%s", filename, "\n")
|
||||
certs, err := certlib.GetCertificateChain(filename, opts)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
|
||||
if leafOnly {
|
||||
displayCert(certs[0])
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range certs {
|
||||
displayCert(certs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
|
||||
// "\2: \1,")
|
||||
|
||||
var keyUsage = map[x509.KeyUsage]string{
|
||||
x509.KeyUsageDigitalSignature: "digital signature",
|
||||
x509.KeyUsageContentCommitment: "content committment",
|
||||
x509.KeyUsageKeyEncipherment: "key encipherment",
|
||||
x509.KeyUsageKeyAgreement: "key agreement",
|
||||
x509.KeyUsageDataEncipherment: "data encipherment",
|
||||
x509.KeyUsageCertSign: "cert sign",
|
||||
x509.KeyUsageCRLSign: "crl sign",
|
||||
x509.KeyUsageEncipherOnly: "encipher only",
|
||||
x509.KeyUsageDecipherOnly: "decipher only",
|
||||
}
|
||||
|
||||
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
||||
x509.ExtKeyUsageAny: "any",
|
||||
x509.ExtKeyUsageServerAuth: "server auth",
|
||||
x509.ExtKeyUsageClientAuth: "client auth",
|
||||
x509.ExtKeyUsageCodeSigning: "code signing",
|
||||
x509.ExtKeyUsageEmailProtection: "s/mime",
|
||||
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
||||
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
||||
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
||||
x509.ExtKeyUsageTimeStamping: "timestamping",
|
||||
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
||||
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
||||
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
||||
}
|
||||
|
||||
func pubKeyAlgo(a x509.PublicKeyAlgorithm) string {
|
||||
switch a {
|
||||
case x509.RSA:
|
||||
return "RSA"
|
||||
case x509.ECDSA:
|
||||
return "ECDSA"
|
||||
case x509.DSA:
|
||||
return "DSA"
|
||||
default:
|
||||
return "unknown public key algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
|
||||
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
||||
return "RSA"
|
||||
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
return "ECDSA"
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return "DSA"
|
||||
default:
|
||||
return "unknown public key algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return "unknown hash algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
s := " "
|
||||
for i := 0; i < n; i++ {
|
||||
s += " "
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func indentLen(n int) int {
|
||||
return 4 + (8 * n)
|
||||
}
|
||||
|
||||
// this isn't real efficient, but that's not a problem here
|
||||
func wrap(s string, indent int) string {
|
||||
if indent > 3 {
|
||||
indent = 3
|
||||
}
|
||||
|
||||
wrapped := text.Wrap(s, maxLine)
|
||||
lines := strings.SplitN(wrapped, "\n", 2)
|
||||
if len(lines) == 1 {
|
||||
return lines[0]
|
||||
}
|
||||
|
||||
if (maxLine - indentLen(indent)) <= 0 {
|
||||
panic("too much indentation")
|
||||
}
|
||||
|
||||
rest := strings.Join(lines[1:], " ")
|
||||
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
||||
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
var s string
|
||||
for i := range in {
|
||||
s += fmt.Sprintf("%02X:", in[i])
|
||||
}
|
||||
|
||||
return strings.Trim(s, ":")
|
||||
}
|
||||
|
||||
// permissiveConfig returns a maximally-accepting TLS configuration;
|
||||
// the purpose is to look at the cert, not verify the security properties
|
||||
// of the connection.
|
||||
func permissiveConfig() *tls.Config {
|
||||
return &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
|
||||
// verifyConfig returns a config that will verify the connection.
|
||||
func verifyConfig(hostname string) *tls.Config {
|
||||
return &tls.Config{
|
||||
ServerName: hostname,
|
||||
}
|
||||
}
|
||||
|
||||
type connInfo struct {
|
||||
// The original URI provided.
|
||||
URI string
|
||||
|
||||
// The hostname of the server.
|
||||
Host string
|
||||
|
||||
// The port to connect on.
|
||||
Port string
|
||||
|
||||
// The address to connect to.
|
||||
Addr string
|
||||
}
|
||||
|
||||
func getConnInfo(uri string) *connInfo {
|
||||
ci := &connInfo{URI: uri}
|
||||
ci.Host = uri[len("https://"):]
|
||||
|
||||
host, port, err := net.SplitHostPort(ci.Host)
|
||||
if err != nil {
|
||||
ci.Port = "443"
|
||||
} else {
|
||||
ci.Host = host
|
||||
ci.Port = port
|
||||
}
|
||||
ci.Addr = net.JoinHostPort(ci.Host, ci.Port)
|
||||
return ci
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -54,7 +53,7 @@ func displayName(name pkix.Name) string {
|
||||
}
|
||||
|
||||
func expires(cert *x509.Certificate) time.Duration {
|
||||
return cert.NotAfter.Sub(time.Now())
|
||||
return time.Until(cert.NotAfter)
|
||||
}
|
||||
|
||||
func inDanger(cert *x509.Certificate) bool {
|
||||
@@ -76,20 +75,17 @@ func checkCert(cert *x509.Certificate) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := &certlib.FetcherOpts{}
|
||||
|
||||
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
|
||||
flag.BoolVar(&warnOnly, "q", false, "only warn about expiring certs")
|
||||
flag.DurationVar(&leeway, "t", leeway, "warn if certificates are closer than this to expiring")
|
||||
flag.Parse()
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
in, err := ioutil.ReadFile(file)
|
||||
certs, err := certlib.GetCertificateChain(file, opts)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to read file")
|
||||
continue
|
||||
}
|
||||
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
lib.Warn(err, "while parsing certificates")
|
||||
_, _ = lib.Warn(err, "while parsing certificates")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
53
cmd/certser/main.go
Normal file
53
cmd/certser/main.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
const displayInt lib.HexEncodeMode = iota
|
||||
|
||||
func parseDisplayMode(mode string) lib.HexEncodeMode {
|
||||
mode = strings.ToLower(mode)
|
||||
|
||||
if mode == "int" {
|
||||
return displayInt
|
||||
}
|
||||
|
||||
return lib.ParseHexEncodeMode(mode)
|
||||
}
|
||||
|
||||
func serialString(cert *x509.Certificate, mode lib.HexEncodeMode) string {
|
||||
if mode == displayInt {
|
||||
return cert.SerialNumber.String()
|
||||
}
|
||||
|
||||
return lib.HexEncode(cert.SerialNumber.Bytes(), mode)
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := &certlib.FetcherOpts{}
|
||||
displayAs := flag.String("d", "int", "display mode (int, hex, uhex)")
|
||||
showExpiry := flag.Bool("e", false, "show expiry date")
|
||||
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
|
||||
flag.Parse()
|
||||
|
||||
displayMode := parseDisplayMode(*displayAs)
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
cert, err := certlib.GetCertificate(arg, opts)
|
||||
die.If(err)
|
||||
|
||||
fmt.Printf("%s: %s", arg, serialString(cert, displayMode))
|
||||
if *showExpiry {
|
||||
fmt.Printf(" (%s)", cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
@@ -4,13 +4,11 @@ import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
@@ -30,83 +28,122 @@ func printRevocation(cert *x509.Certificate) {
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var caFile, intFile string
|
||||
var forceIntermediateBundle, revexp, verbose bool
|
||||
flag.StringVar(&caFile, "ca", "", "CA certificate `bundle`")
|
||||
flag.StringVar(&intFile, "i", "", "intermediate `bundle`")
|
||||
flag.BoolVar(&forceIntermediateBundle, "f", false,
|
||||
type appConfig struct {
|
||||
caFile, intFile string
|
||||
forceIntermediateBundle bool
|
||||
revexp, skipVerify, verbose bool
|
||||
}
|
||||
|
||||
func parseFlags() appConfig {
|
||||
var cfg appConfig
|
||||
flag.StringVar(&cfg.caFile, "ca", "", "CA certificate `bundle`")
|
||||
flag.StringVar(&cfg.intFile, "i", "", "intermediate `bundle`")
|
||||
flag.BoolVar(&cfg.forceIntermediateBundle, "f", false,
|
||||
"force the use of the intermediate bundle, ignoring any intermediates bundled with certificate")
|
||||
flag.BoolVar(&revexp, "r", false, "print revocation and expiry information")
|
||||
flag.BoolVar(&verbose, "v", false, "verbose")
|
||||
flag.BoolVar(&cfg.skipVerify, "k", false, "skip CA verification")
|
||||
flag.BoolVar(&cfg.revexp, "r", false, "print revocation and expiry information")
|
||||
flag.BoolVar(&cfg.verbose, "v", false, "verbose")
|
||||
flag.Parse()
|
||||
return cfg
|
||||
}
|
||||
|
||||
var roots *x509.CertPool
|
||||
if caFile != "" {
|
||||
var err error
|
||||
if verbose {
|
||||
fmt.Println("[+] loading root certificates from", caFile)
|
||||
}
|
||||
roots, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
func loadRoots(caFile string, verbose bool) (*x509.CertPool, error) {
|
||||
if caFile == "" {
|
||||
return x509.SystemCertPool()
|
||||
}
|
||||
|
||||
var ints *x509.CertPool
|
||||
if intFile != "" {
|
||||
var err error
|
||||
if verbose {
|
||||
fmt.Println("[+] loading intermediate certificates from", intFile)
|
||||
}
|
||||
ints, err = certlib.LoadPEMCertPool(caFile)
|
||||
die.If(err)
|
||||
} else {
|
||||
ints = x509.NewCertPool()
|
||||
}
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [-ca bundle] [-i bundle] cert",
|
||||
lib.ProgName())
|
||||
}
|
||||
|
||||
fileData, err := ioutil.ReadFile(flag.Arg(0))
|
||||
die.If(err)
|
||||
|
||||
chain, err := certlib.ParseCertificatesPEM(fileData)
|
||||
die.If(err)
|
||||
if verbose {
|
||||
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))
|
||||
fmt.Println("[+] loading root certificates from", caFile)
|
||||
}
|
||||
return certlib.LoadPEMCertPool(caFile)
|
||||
}
|
||||
|
||||
cert := chain[0]
|
||||
if len(chain) > 1 {
|
||||
if !forceIntermediateBundle {
|
||||
for _, intermediate := range chain[1:] {
|
||||
if verbose {
|
||||
fmt.Printf("[+] adding intermediate with SKI %x\n", intermediate.SubjectKeyId)
|
||||
}
|
||||
func loadIntermediates(intFile string, verbose bool) (*x509.CertPool, error) {
|
||||
if intFile == "" {
|
||||
return x509.NewCertPool(), nil
|
||||
}
|
||||
if verbose {
|
||||
fmt.Println("[+] loading intermediate certificates from", intFile)
|
||||
}
|
||||
// Note: use intFile here (previously used caFile mistakenly)
|
||||
return certlib.LoadPEMCertPool(intFile)
|
||||
}
|
||||
|
||||
ints.AddCert(intermediate)
|
||||
}
|
||||
func addBundledIntermediates(chain []*x509.Certificate, pool *x509.CertPool, verbose bool) {
|
||||
for _, intermediate := range chain[1:] {
|
||||
if verbose {
|
||||
fmt.Printf("[+] adding intermediate with SKI %x\n", intermediate.SubjectKeyId)
|
||||
}
|
||||
pool.AddCert(intermediate)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyCert(cert *x509.Certificate, roots, ints *x509.CertPool) error {
|
||||
opts := x509.VerifyOptions{
|
||||
Intermediates: ints,
|
||||
Roots: roots,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
}
|
||||
_, err := cert.Verify(opts)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = cert.Verify(opts)
|
||||
func run(cfg appConfig) error {
|
||||
roots, err := loadRoots(cfg.caFile, cfg.verbose)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Verification failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
return err
|
||||
}
|
||||
|
||||
if verbose {
|
||||
ints, err := loadIntermediates(cfg.intFile, cfg.verbose)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [-ca bundle] [-i bundle] cert", lib.ProgName())
|
||||
}
|
||||
|
||||
combinedPool, err := certlib.LoadFullCertPool(cfg.caFile, cfg.intFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build combined pool: %w", err)
|
||||
}
|
||||
|
||||
opts := &certlib.FetcherOpts{
|
||||
Roots: combinedPool,
|
||||
SkipVerify: cfg.skipVerify,
|
||||
}
|
||||
|
||||
chain, err := certlib.GetCertificateChain(flag.Arg(0), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cfg.verbose {
|
||||
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))
|
||||
}
|
||||
|
||||
cert := chain[0]
|
||||
if len(chain) > 1 && !cfg.forceIntermediateBundle {
|
||||
addBundledIntermediates(chain, ints, cfg.verbose)
|
||||
}
|
||||
|
||||
if err = verifyCert(cert, roots, ints); err != nil {
|
||||
return fmt.Errorf("certificate verification failed: %w", err)
|
||||
}
|
||||
|
||||
if cfg.verbose {
|
||||
fmt.Println("OK")
|
||||
}
|
||||
|
||||
if revexp {
|
||||
if cfg.revexp {
|
||||
printRevocation(cert)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg := parseFlags()
|
||||
if err := run(cfg); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -56,7 +58,7 @@ var modes = ssh.TerminalModes{
|
||||
}
|
||||
|
||||
func sshAgent() ssh.AuthMethod {
|
||||
a, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
|
||||
a, err := (&net.Dialer{}).DialContext(context.Background(), "unix", os.Getenv("SSH_AUTH_SOCK"))
|
||||
if err == nil {
|
||||
return ssh.PublicKeysCallback(agent.NewClient(a).Signers)
|
||||
}
|
||||
@@ -82,7 +84,7 @@ func scanner(host string, in io.Reader, out io.Writer) {
|
||||
}
|
||||
}
|
||||
|
||||
func logError(host string, err error, format string, args ...interface{}) {
|
||||
func logError(host string, err error, format string, args ...any) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
log.Printf("[%s] FAILED: %s: %v\n", host, msg, err)
|
||||
}
|
||||
@@ -93,7 +95,7 @@ func exec(wg *sync.WaitGroup, user, host string, commands []string) {
|
||||
defer func() {
|
||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||
err := shutdown[i]()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
logError(host, err, "shutting down")
|
||||
}
|
||||
}
|
||||
@@ -115,7 +117,7 @@ func exec(wg *sync.WaitGroup, user, host string, commands []string) {
|
||||
}
|
||||
shutdown = append(shutdown, session.Close)
|
||||
|
||||
if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
|
||||
if err = session.RequestPty("xterm", 80, 40, modes); err != nil {
|
||||
session.Close()
|
||||
logError(host, err, "request for pty failed")
|
||||
return
|
||||
@@ -150,7 +152,7 @@ func upload(wg *sync.WaitGroup, user, host, local, remote string) {
|
||||
defer func() {
|
||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||
err := shutdown[i]()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
logError(host, err, "shutting down")
|
||||
}
|
||||
}
|
||||
@@ -199,7 +201,7 @@ func upload(wg *sync.WaitGroup, user, host, local, remote string) {
|
||||
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
logError(host, err, "reading chunk")
|
||||
@@ -215,7 +217,7 @@ func download(wg *sync.WaitGroup, user, host, local, remote string) {
|
||||
defer func() {
|
||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||
err := shutdown[i]()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
logError(host, err, "shutting down")
|
||||
}
|
||||
}
|
||||
@@ -265,7 +267,7 @@ func download(wg *sync.WaitGroup, user, host, local, remote string) {
|
||||
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
logError(host, err, "reading chunk")
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
@@ -26,7 +27,7 @@ func setupFile(hdr *tar.Header, file *os.File) error {
|
||||
if verbose {
|
||||
fmt.Printf("\tchmod %0#o\n", hdr.Mode)
|
||||
}
|
||||
err := file.Chmod(os.FileMode(hdr.Mode))
|
||||
err := file.Chmod(os.FileMode(hdr.Mode & 0xFFFFFFFF)) // #nosec G115
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -48,73 +49,105 @@ func linkTarget(target, top string) string {
|
||||
return target
|
||||
}
|
||||
|
||||
return filepath.Clean(filepath.Join(target, top))
|
||||
return filepath.Clean(filepath.Join(top, target))
|
||||
}
|
||||
|
||||
// safeJoin joins base and elem and ensures the resulting path does not escape base.
|
||||
func safeJoin(base, elem string) (string, error) {
|
||||
cleanBase := filepath.Clean(base)
|
||||
joined := filepath.Clean(filepath.Join(cleanBase, elem))
|
||||
|
||||
absBase, err := filepath.Abs(cleanBase)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absJoined, err := filepath.Abs(joined)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rel, err := filepath.Rel(absBase, absJoined)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return "", fmt.Errorf("path traversal detected: %s escapes %s", elem, base)
|
||||
}
|
||||
return joined, nil
|
||||
}
|
||||
|
||||
func handleTypeReg(tfr *tar.Reader, hdr *tar.Header, filePath string) error {
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err = io.Copy(file, tfr); err != nil {
|
||||
return err
|
||||
}
|
||||
return setupFile(hdr, file)
|
||||
}
|
||||
|
||||
func handleTypeLink(hdr *tar.Header, top, filePath string) error {
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
srcPath, err := safeJoin(top, hdr.Linkname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
source, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
if _, err = io.Copy(file, source); err != nil {
|
||||
return err
|
||||
}
|
||||
return setupFile(hdr, file)
|
||||
}
|
||||
|
||||
func handleTypeSymlink(hdr *tar.Header, top, filePath string) error {
|
||||
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
||||
return fmt.Errorf("symlink %s is outside the top-level %s", hdr.Linkname, top)
|
||||
}
|
||||
path := linkTarget(hdr.Linkname, top)
|
||||
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
||||
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
||||
}
|
||||
|
||||
func handleTypeDir(hdr *tar.Header, filePath string) error {
|
||||
return os.MkdirAll(filePath, os.FileMode(hdr.Mode&0xFFFFFFFF)) // #nosec G115
|
||||
}
|
||||
|
||||
func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
||||
if verbose {
|
||||
fmt.Println(hdr.Name)
|
||||
}
|
||||
filePath := filepath.Clean(filepath.Join(top, hdr.Name))
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg:
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(file, tfr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = setupFile(hdr, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case tar.TypeLink:
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
source, err := os.Open(hdr.Linkname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(file, source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = setupFile(hdr, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case tar.TypeSymlink:
|
||||
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
||||
return fmt.Errorf("symlink %s is outside the top-level %s",
|
||||
hdr.Linkname, top)
|
||||
}
|
||||
path := linkTarget(hdr.Linkname, top)
|
||||
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
||||
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case tar.TypeDir:
|
||||
err := os.MkdirAll(filePath, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filePath, err := safeJoin(top, hdr.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeReg:
|
||||
return handleTypeReg(tfr, hdr, filePath)
|
||||
case tar.TypeLink:
|
||||
return handleTypeLink(hdr, top, filePath)
|
||||
case tar.TypeSymlink:
|
||||
return handleTypeSymlink(hdr, top, filePath)
|
||||
case tar.TypeDir:
|
||||
return handleTypeDir(hdr, filePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -261,16 +294,16 @@ func main() {
|
||||
die.If(err)
|
||||
|
||||
tfr := tar.NewReader(r)
|
||||
var hdr *tar.Header
|
||||
for {
|
||||
hdr, err := tfr.Next()
|
||||
if err == io.EOF {
|
||||
hdr, err = tfr.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
die.If(err)
|
||||
|
||||
err = processFile(tfr, hdr, top)
|
||||
die.If(err)
|
||||
|
||||
}
|
||||
|
||||
r.Close()
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
@@ -17,17 +17,10 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, fileName := range flag.Args() {
|
||||
in, err := ioutil.ReadFile(fileName)
|
||||
in, err := os.ReadFile(fileName)
|
||||
die.If(err)
|
||||
|
||||
if p, _ := pem.Decode(in); p != nil {
|
||||
if p.Type != "CERTIFICATE REQUEST" {
|
||||
log.Fatal("INVALID FILE TYPE")
|
||||
}
|
||||
in = p.Bytes
|
||||
}
|
||||
|
||||
csr, err := x509.ParseCertificateRequest(in)
|
||||
csr, _, err := certlib.ParseCSR(in)
|
||||
die.If(err)
|
||||
|
||||
out, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
|
||||
@@ -48,8 +41,8 @@ func main() {
|
||||
Bytes: out,
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(fileName+".pub", pem.EncodeToMemory(p), 0644)
|
||||
err = os.WriteFile(fileName+".pub", pem.EncodeToMemory(p), 0o644) // #nosec G306
|
||||
die.If(err)
|
||||
fmt.Printf("[+] wrote %s.\n", fileName+".pub")
|
||||
fmt.Fprintf(os.Stdout, "[+] wrote %s.\n", fileName+".pub")
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -152,7 +153,7 @@ func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
cmd := exec.CommandContext(context.Background(), path, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
@@ -163,7 +164,6 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
var logLevel, mountDir, syncDir, target string
|
||||
var dryRun, quietMode, noSyslog, verboseRsync bool
|
||||
|
||||
@@ -219,7 +219,7 @@ func main() {
|
||||
if excludeFile != "" {
|
||||
defer func() {
|
||||
log.Infof("removing exclude file %s", excludeFile)
|
||||
if err := os.Remove(excludeFile); err != nil {
|
||||
if rmErr := os.Remove(excludeFile); rmErr != nil {
|
||||
log.Warningf("failed to remove temp file %s", excludeFile)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -15,43 +15,41 @@ import (
|
||||
const defaultHashAlgorithm = "sha256"
|
||||
|
||||
var (
|
||||
hAlgo string
|
||||
hAlgo string
|
||||
debug = dbg.New()
|
||||
)
|
||||
|
||||
|
||||
func openImage(imageFile string) (image *os.File, hash []byte, err error) {
|
||||
image, err = os.Open(imageFile)
|
||||
func openImage(imageFile string) (*os.File, []byte, error) {
|
||||
f, err := os.Open(imageFile)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
hash, err = ahash.SumReader(hAlgo, image)
|
||||
h, err := ahash.SumReader(hAlgo, f)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
_, err = image.Seek(0, 0)
|
||||
if err != nil {
|
||||
return
|
||||
if _, err = f.Seek(0, 0); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
debug.Printf("%s %x\n", imageFile, hash)
|
||||
return
|
||||
debug.Printf("%s %x\n", imageFile, h)
|
||||
return f, h, nil
|
||||
}
|
||||
|
||||
func openDevice(devicePath string) (device *os.File, err error) {
|
||||
func openDevice(devicePath string) (*os.File, error) {
|
||||
fi, err := os.Stat(devicePath)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
device, err = os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
||||
device, err := os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return
|
||||
return device, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -105,12 +103,12 @@ func main() {
|
||||
die.If(err)
|
||||
|
||||
if !bytes.Equal(deviceHash, hash) {
|
||||
fmt.Fprintln(os.Stderr, "Hash mismatch:")
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", imageFile, hash)
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", devicePath, deviceHash)
|
||||
os.Exit(1)
|
||||
buf := &bytes.Buffer{}
|
||||
fmt.Fprintln(buf, "Hash mismatch:")
|
||||
fmt.Fprintf(buf, "\t%s: %s\n", imageFile, hash)
|
||||
fmt.Fprintf(buf, "\t%s: %s\n", devicePath, deviceHash)
|
||||
die.With(buf.String())
|
||||
}
|
||||
|
||||
debug.Println("OK")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@@ -1,30 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func usage(w io.Writer, exc int) {
|
||||
fmt.Fprintln(w, `usage: dumpbytes <file>`)
|
||||
fmt.Fprintln(w, `usage: dumpbytes -n tabs <file>`)
|
||||
os.Exit(exc)
|
||||
}
|
||||
|
||||
func printBytes(buf []byte) {
|
||||
fmt.Printf("\t")
|
||||
for i := 0; i < len(buf); i++ {
|
||||
for i := range buf {
|
||||
fmt.Printf("0x%02x, ", buf[i])
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func dumpFile(path string, indentLevel int) error {
|
||||
indent := ""
|
||||
for i := 0; i < indentLevel; i++ {
|
||||
indent += "\t"
|
||||
var indent strings.Builder
|
||||
for range indentLevel {
|
||||
indent.WriteByte('\t')
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
@@ -34,13 +37,14 @@ func dumpFile(path string, indentLevel int) error {
|
||||
|
||||
defer file.Close()
|
||||
|
||||
fmt.Printf("%svar buffer = []byte{\n", indent)
|
||||
fmt.Printf("%svar buffer = []byte{\n", indent.String())
|
||||
var n int
|
||||
for {
|
||||
buf := make([]byte, 8)
|
||||
n, err := file.Read(buf)
|
||||
if err == io.EOF {
|
||||
n, err = file.Read(buf)
|
||||
if errors.Is(err, io.EOF) {
|
||||
if n > 0 {
|
||||
fmt.Printf("%s", indent)
|
||||
fmt.Printf("%s", indent.String())
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
break
|
||||
@@ -50,11 +54,11 @@ func dumpFile(path string, indentLevel int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s", indent)
|
||||
fmt.Printf("%s", indent.String())
|
||||
printBytes(buf[:n])
|
||||
}
|
||||
|
||||
fmt.Printf("%s}\n", indent)
|
||||
fmt.Printf("%s}\n", indent.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
// size of a kilobit in bytes
|
||||
// size of a kilobit in bytes.
|
||||
const kilobit = 128
|
||||
const pageSize = 4096
|
||||
|
||||
@@ -26,10 +26,10 @@ func main() {
|
||||
path = flag.Arg(0)
|
||||
}
|
||||
|
||||
fillByte := uint8(*fill)
|
||||
fillByte := uint8(*fill & 0xff) // #nosec G115 clearing out of bounds bits
|
||||
|
||||
buf := make([]byte, pageSize)
|
||||
for i := 0; i < pageSize; i++ {
|
||||
for i := range pageSize {
|
||||
buf[i] = fillByte
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func main() {
|
||||
die.If(err)
|
||||
defer file.Close()
|
||||
|
||||
for i := 0; i < pages; i++ {
|
||||
for range pages {
|
||||
_, err = file.Write(buf)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
@@ -72,15 +72,13 @@ func main() {
|
||||
|
||||
if end < start {
|
||||
fmt.Fprintln(os.Stderr, "[!] end < start, swapping values")
|
||||
tmp := end
|
||||
end = start
|
||||
start = tmp
|
||||
start, end = end, start
|
||||
}
|
||||
|
||||
var fmtStr string
|
||||
|
||||
if !*quiet {
|
||||
maxLine := fmt.Sprintf("%d", len(lines))
|
||||
maxLine := strconv.Itoa(len(lines))
|
||||
fmtStr = fmt.Sprintf("%%0%dd: %%s", len(maxLine))
|
||||
}
|
||||
|
||||
@@ -98,9 +96,9 @@ func main() {
|
||||
fmtStr += "\n"
|
||||
for i := start; !endFunc(i); i++ {
|
||||
if *quiet {
|
||||
fmt.Println(lines[i])
|
||||
fmt.Fprintln(os.Stdout, lines[i])
|
||||
} else {
|
||||
fmt.Printf(fmtStr, i, lines[i])
|
||||
fmt.Fprintf(os.Stdout, fmtStr, i, lines[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -8,7 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func lookupHost(host string) error {
|
||||
cname, err := net.LookupCNAME(host)
|
||||
r := &net.Resolver{}
|
||||
cname, err := r.LookupCNAME(context.Background(), host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -18,7 +20,7 @@ func lookupHost(host string) error {
|
||||
host = cname
|
||||
}
|
||||
|
||||
addrs, err := net.LookupHost(host)
|
||||
addrs, err := r.LookupHost(context.Background(), host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
@@ -16,20 +16,20 @@ func prettify(file string, validateOnly bool) error {
|
||||
var err error
|
||||
|
||||
if file == "-" {
|
||||
in, err = ioutil.ReadAll(os.Stdin)
|
||||
in, err = io.ReadAll(os.Stdin)
|
||||
} else {
|
||||
in, err = ioutil.ReadFile(file)
|
||||
in, err = os.ReadFile(file)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
lib.Warn(err, "ReadFile")
|
||||
_, _ = lib.Warn(err, "ReadFile")
|
||||
return err
|
||||
}
|
||||
|
||||
var buf = &bytes.Buffer{}
|
||||
err = json.Indent(buf, in, "", " ")
|
||||
if err != nil {
|
||||
lib.Warn(err, "%s", file)
|
||||
_, _ = lib.Warn(err, "%s", file)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -40,11 +40,11 @@ func prettify(file string, validateOnly bool) error {
|
||||
if file == "-" {
|
||||
_, err = os.Stdout.Write(buf.Bytes())
|
||||
} else {
|
||||
err = ioutil.WriteFile(file, buf.Bytes(), 0644)
|
||||
err = os.WriteFile(file, buf.Bytes(), 0o644)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
lib.Warn(err, "WriteFile")
|
||||
_, _ = lib.Warn(err, "WriteFile")
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -55,20 +55,20 @@ func compact(file string, validateOnly bool) error {
|
||||
var err error
|
||||
|
||||
if file == "-" {
|
||||
in, err = ioutil.ReadAll(os.Stdin)
|
||||
in, err = io.ReadAll(os.Stdin)
|
||||
} else {
|
||||
in, err = ioutil.ReadFile(file)
|
||||
in, err = os.ReadFile(file)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
lib.Warn(err, "ReadFile")
|
||||
_, _ = lib.Warn(err, "ReadFile")
|
||||
return err
|
||||
}
|
||||
|
||||
var buf = &bytes.Buffer{}
|
||||
err = json.Compact(buf, in)
|
||||
if err != nil {
|
||||
lib.Warn(err, "%s", file)
|
||||
_, _ = lib.Warn(err, "%s", file)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -79,11 +79,11 @@ func compact(file string, validateOnly bool) error {
|
||||
if file == "-" {
|
||||
_, err = os.Stdout.Write(buf.Bytes())
|
||||
} else {
|
||||
err = ioutil.WriteFile(file, buf.Bytes(), 0644)
|
||||
err = os.WriteFile(file, buf.Bytes(), 0o644)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
lib.Warn(err, "WriteFile")
|
||||
_, _ = lib.Warn(err, "WriteFile")
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -91,7 +91,7 @@ func compact(file string, validateOnly bool) error {
|
||||
|
||||
func usage() {
|
||||
progname := lib.ProgName()
|
||||
fmt.Printf(`Usage: %s [-h] files...
|
||||
fmt.Fprintf(os.Stdout, `Usage: %s [-h] files...
|
||||
%s is used to lint and prettify (or compact) JSON files. The
|
||||
files will be updated in-place.
|
||||
|
||||
@@ -100,7 +100,6 @@ func usage() {
|
||||
-h Print this help message.
|
||||
-n Don't prettify; only perform validation.
|
||||
`, progname, progname)
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -11,7 +11,10 @@ based on whether the source filename ends in ".gz".
|
||||
|
||||
Flags:
|
||||
-l level Compression level (0-9). Only meaninful when
|
||||
compressing a file.
|
||||
compressing a file.
|
||||
-u Do not restrict the size during decompression. As
|
||||
a safeguard against gzip bombs, the maximum size
|
||||
allowed is 32 * the compressed file size.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const gzipExt = ".gz"
|
||||
@@ -18,52 +16,68 @@ const gzipExt = ".gz"
|
||||
func compress(path, target string, level int) error {
|
||||
sourceFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening file for read")
|
||||
return fmt.Errorf("opening file for read: %w", err)
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destFile, err := os.Create(target)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening file for write")
|
||||
return fmt.Errorf("opening file for write: %w", err)
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid compression level")
|
||||
return fmt.Errorf("invalid compression level: %w", err)
|
||||
}
|
||||
defer gzipCompressor.Close()
|
||||
|
||||
_, err = io.Copy(gzipCompressor, sourceFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compressing file")
|
||||
return fmt.Errorf("compressing file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func uncompress(path, target string) error {
|
||||
func uncompress(path, target string, unrestrict bool) error {
|
||||
sourceFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening file for read")
|
||||
return fmt.Errorf("opening file for read: %w", err)
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
fi, err := sourceFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading file stats: %w", err)
|
||||
}
|
||||
|
||||
maxDecompressionSize := fi.Size() * 32
|
||||
|
||||
gzipUncompressor, err := gzip.NewReader(sourceFile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading gzip headers")
|
||||
return fmt.Errorf("reading gzip headers: %w", err)
|
||||
}
|
||||
defer gzipUncompressor.Close()
|
||||
|
||||
var reader io.Reader = &io.LimitedReader{
|
||||
R: gzipUncompressor,
|
||||
N: maxDecompressionSize,
|
||||
}
|
||||
|
||||
if unrestrict {
|
||||
reader = gzipUncompressor
|
||||
}
|
||||
|
||||
destFile, err := os.Create(target)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "opening file for write")
|
||||
return fmt.Errorf("opening file for write: %w", err)
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
_, err = io.Copy(destFile, gzipUncompressor)
|
||||
_, err = io.Copy(destFile, reader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "uncompressing file")
|
||||
return fmt.Errorf("uncompressing file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -89,8 +103,8 @@ func isDir(path string) bool {
|
||||
file, err := os.Open(path)
|
||||
if err == nil {
|
||||
defer file.Close()
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
stat, err2 := file.Stat()
|
||||
if err2 != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -109,7 +123,7 @@ func pathForUncompressing(source, dest string) (string, error) {
|
||||
|
||||
source = filepath.Base(source)
|
||||
if !strings.HasSuffix(source, gzipExt) {
|
||||
return "", errors.Errorf("%s is a not gzip-compressed file", source)
|
||||
return "", fmt.Errorf("%s is a not gzip-compressed file", source)
|
||||
}
|
||||
outFile := source[:len(source)-len(gzipExt)]
|
||||
outFile = filepath.Join(dest, outFile)
|
||||
@@ -123,7 +137,7 @@ func pathForCompressing(source, dest string) (string, error) {
|
||||
|
||||
source = filepath.Base(source)
|
||||
if strings.HasSuffix(source, gzipExt) {
|
||||
return "", errors.Errorf("%s is a gzip-compressed file", source)
|
||||
return "", fmt.Errorf("%s is a gzip-compressed file", source)
|
||||
}
|
||||
|
||||
dest = filepath.Join(dest, source+gzipExt)
|
||||
@@ -134,8 +148,11 @@ func main() {
|
||||
var level int
|
||||
var path string
|
||||
var target = "."
|
||||
var err error
|
||||
var unrestrict bool
|
||||
|
||||
flag.IntVar(&level, "l", flate.DefaultCompression, "compression level")
|
||||
flag.BoolVar(&unrestrict, "u", false, "do not restrict decompression")
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() < 1 || flag.NArg() > 2 {
|
||||
@@ -149,30 +166,31 @@ func main() {
|
||||
}
|
||||
|
||||
if strings.HasSuffix(path, gzipExt) {
|
||||
target, err := pathForUncompressing(path, target)
|
||||
target, err = pathForUncompressing(path, target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = uncompress(path, target)
|
||||
err = uncompress(path, target, unrestrict)
|
||||
if err != nil {
|
||||
os.Remove(target)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
target, err := pathForCompressing(path, target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = compress(path, target, level)
|
||||
if err != nil {
|
||||
os.Remove(target)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
target, err = pathForCompressing(path, target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = compress(path, target, level)
|
||||
if err != nil {
|
||||
os.Remove(target)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,14 @@ func main() {
|
||||
usage()
|
||||
}
|
||||
|
||||
min, err := strconv.Atoi(flag.Arg(1))
|
||||
minVal, err := strconv.Atoi(flag.Arg(1))
|
||||
dieIf(err)
|
||||
|
||||
max, err := strconv.Atoi(flag.Arg(2))
|
||||
maxVal, err := strconv.Atoi(flag.Arg(2))
|
||||
dieIf(err)
|
||||
|
||||
code := kind << 6
|
||||
code += (min << 3)
|
||||
code += max
|
||||
fmt.Printf("%0o\n", code)
|
||||
code += (minVal << 3)
|
||||
code += maxVal
|
||||
fmt.Fprintf(os.Stdout, "%0o\n", code)
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -47,7 +46,7 @@ func help(w io.Writer) {
|
||||
}
|
||||
|
||||
func loadDatabase() {
|
||||
data, err := ioutil.ReadFile(dbFile)
|
||||
data, err := os.ReadFile(dbFile)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
partsDB = &database{
|
||||
Version: dbVersion,
|
||||
@@ -74,7 +73,7 @@ func writeDB() {
|
||||
data, err := json.Marshal(partsDB)
|
||||
die.If(err)
|
||||
|
||||
err = ioutil.WriteFile(dbFile, data, 0644)
|
||||
err = os.WriteFile(dbFile, data, 0644)
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,14 +4,13 @@ import (
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
var ext = ".bin"
|
||||
|
||||
func stripPEM(path string) error {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -22,7 +21,7 @@ func stripPEM(path string) error {
|
||||
fmt.Fprintf(os.Stderr, " (only the first object will be decoded)\n")
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path+ext, p.Bytes, 0644)
|
||||
return os.WriteFile(path+ext, p.Bytes, 0644)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -3,8 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
@@ -21,9 +20,9 @@ func main() {
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "-" {
|
||||
in, err = ioutil.ReadAll(os.Stdin)
|
||||
in, err = io.ReadAll(os.Stdin)
|
||||
} else {
|
||||
in, err = ioutil.ReadFile(flag.Arg(0))
|
||||
in, err = os.ReadFile(flag.Arg(0))
|
||||
}
|
||||
if err != nil {
|
||||
lib.Err(lib.ExitFailure, err, "couldn't read file")
|
||||
@@ -33,5 +32,7 @@ func main() {
|
||||
if p == nil {
|
||||
lib.Errx(lib.ExitFailure, "%s isn't a PEM-encoded file", flag.Arg(0))
|
||||
}
|
||||
fmt.Printf("%s", p.Bytes)
|
||||
if _, err = os.Stdout.Write(p.Bytes); err != nil {
|
||||
lib.Err(lib.ExitFailure, err, "writing body")
|
||||
}
|
||||
}
|
||||
@@ -70,7 +70,7 @@ func main() {
|
||||
lib.Err(lib.ExitFailure, err, "failed to read input")
|
||||
}
|
||||
case argc > 1:
|
||||
for i := 0; i < argc; i++ {
|
||||
for i := range argc {
|
||||
path := flag.Arg(i)
|
||||
err = copyFile(path, buf)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
@@ -13,14 +12,14 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
for _, fileName := range flag.Args() {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
data, err := os.ReadFile(fileName)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("[+] %s:\n", fileName)
|
||||
rest := data[:]
|
||||
fmt.Fprintf(os.Stdout, "[+] %s:\n", fileName)
|
||||
rest := data
|
||||
for {
|
||||
var p *pem.Block
|
||||
p, rest = pem.Decode(rest)
|
||||
@@ -28,13 +27,14 @@ func main() {
|
||||
break
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(p.Bytes)
|
||||
var cert *x509.Certificate
|
||||
cert, err = x509.ParseCertificate(p.Bytes)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
||||
break
|
||||
}
|
||||
|
||||
fmt.Printf("\t%+v\n", cert.Subject.CommonName)
|
||||
fmt.Fprintf(os.Stdout, "\t%+v\n", cert.Subject.CommonName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func newName(path string) (string, error) {
|
||||
return hashName(path, encodedHash), nil
|
||||
}
|
||||
|
||||
func move(dst, src string, force bool) (err error) {
|
||||
func move(dst, src string, force bool) error {
|
||||
if fileutil.FileDoesExist(dst) && !force {
|
||||
return fmt.Errorf("%s exists (pass the -f flag to overwrite)", dst)
|
||||
}
|
||||
@@ -52,21 +52,23 @@ func move(dst, src string, force bool) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func(e error) {
|
||||
var retErr error
|
||||
defer func(e *error) {
|
||||
dstFile.Close()
|
||||
if e != nil {
|
||||
if *e != nil {
|
||||
os.Remove(dst)
|
||||
}
|
||||
}(err)
|
||||
}(&retErr)
|
||||
|
||||
srcFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
_, err = io.Copy(dstFile, srcFile)
|
||||
if err != nil {
|
||||
if _, err = io.Copy(dstFile, srcFile); err != nil {
|
||||
retErr = err
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -94,6 +96,44 @@ func init() {
|
||||
flag.Usage = func() { usage(os.Stdout) }
|
||||
}
|
||||
|
||||
type options struct {
|
||||
dryRun, force, printChanged, verbose bool
|
||||
}
|
||||
|
||||
func processOne(file string, opt options) error {
|
||||
renamed, err := newName(file)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "failed to get new file name")
|
||||
return err
|
||||
}
|
||||
if opt.verbose && !opt.printChanged {
|
||||
fmt.Fprintln(os.Stdout, file)
|
||||
}
|
||||
if renamed == file {
|
||||
return nil
|
||||
}
|
||||
if !opt.dryRun {
|
||||
if err = move(renamed, file, opt.force); err != nil {
|
||||
_, _ = lib.Warn(err, "failed to rename file from %s to %s", file, renamed)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opt.printChanged && !opt.verbose {
|
||||
fmt.Fprintln(os.Stdout, file, "->", renamed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func run(dryRun, force, printChanged, verbose bool, files []string) {
|
||||
if verbose && printChanged {
|
||||
printChanged = false
|
||||
}
|
||||
opt := options{dryRun: dryRun, force: force, printChanged: printChanged, verbose: verbose}
|
||||
for _, file := range files {
|
||||
_ = processOne(file, opt)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var dryRun, force, printChanged, verbose bool
|
||||
flag.BoolVar(&force, "f", false, "force overwriting of files if there is a collision")
|
||||
@@ -102,34 +142,5 @@ func main() {
|
||||
flag.BoolVar(&verbose, "v", false, "list all processed files")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if verbose && printChanged {
|
||||
printChanged = false
|
||||
}
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
renamed, err := newName(file)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to get new file name")
|
||||
continue
|
||||
}
|
||||
|
||||
if verbose && !printChanged {
|
||||
fmt.Println(file)
|
||||
}
|
||||
|
||||
if renamed != file {
|
||||
if !dryRun {
|
||||
err = move(renamed, file, force)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to rename file from %s to %s", file, renamed)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if printChanged && !verbose {
|
||||
fmt.Println(file, "->", renamed)
|
||||
}
|
||||
}
|
||||
}
|
||||
run(dryRun, force, printChanged, verbose, flag.Args())
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -66,24 +67,25 @@ func main() {
|
||||
for _, remote := range flag.Args() {
|
||||
u, err := url.Parse(remote)
|
||||
if err != nil {
|
||||
lib.Warn(err, "parsing %s", remote)
|
||||
_, _ = lib.Warn(err, "parsing %s", remote)
|
||||
continue
|
||||
}
|
||||
|
||||
name := filepath.Base(u.Path)
|
||||
if name == "" {
|
||||
lib.Warnx("source URL doesn't appear to name a file")
|
||||
_, _ = lib.Warnx("source URL doesn't appear to name a file")
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := http.Get(remote)
|
||||
if err != nil {
|
||||
lib.Warn(err, "fetching %s", remote)
|
||||
req, reqErr := http.NewRequestWithContext(context.Background(), http.MethodGet, remote, nil)
|
||||
if reqErr != nil {
|
||||
_, _ = lib.Warn(reqErr, "building request for %s", remote)
|
||||
continue
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
lib.Warn(err, "fetching %s", remote)
|
||||
_, _ = lib.Warn(err, "fetching %s", remote)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -17,8 +17,8 @@ func rollDie(count, sides int) []int {
|
||||
sum := 0
|
||||
var rolls []int
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
roll := rand.Intn(sides) + 1
|
||||
for range count {
|
||||
roll := rand.IntN(sides) + 1 // #nosec G404
|
||||
sum += roll
|
||||
rolls = append(rolls, roll)
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func init() {
|
||||
project = wd[len(gopath):]
|
||||
}
|
||||
|
||||
func walkFile(path string, info os.FileInfo, err error) error {
|
||||
func walkFile(path string, _ os.FileInfo, err error) error {
|
||||
if ignores[path] {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
@@ -62,22 +62,27 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Println(path)
|
||||
|
||||
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Println(path)
|
||||
|
||||
f, err2 := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
for _, importSpec := range f.Imports {
|
||||
importPath := strings.Trim(importSpec.Path.Value, `"`)
|
||||
if stdLibRegexp.MatchString(importPath) {
|
||||
switch {
|
||||
case stdLibRegexp.MatchString(importPath):
|
||||
debug.Println("standard lib:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, project) {
|
||||
case strings.HasPrefix(importPath, project):
|
||||
debug.Println("internal import:", importPath)
|
||||
continue
|
||||
} else if strings.HasPrefix(importPath, "golang.org/") {
|
||||
case strings.HasPrefix(importPath, "golang.org/"):
|
||||
debug.Println("extended lib:", importPath)
|
||||
continue
|
||||
}
|
||||
@@ -102,7 +107,7 @@ func main() {
|
||||
ignores["vendor"] = true
|
||||
}
|
||||
|
||||
for _, word := range strings.Split(ignoreLine, ",") {
|
||||
for word := range strings.SplitSeq(ignoreLine, ",") {
|
||||
ignores[strings.TrimSpace(word)] = true
|
||||
}
|
||||
|
||||
|
||||
105
cmd/ski/main.go
105
cmd/ski/main.go
@@ -2,10 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha1" // #nosec G505
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
@@ -13,14 +12,18 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
const (
|
||||
keyTypeRSA = "RSA"
|
||||
keyTypeECDSA = "ECDSA"
|
||||
)
|
||||
|
||||
func usage(w io.Writer) {
|
||||
fmt.Fprintf(w, `ski: print subject key info for PEM-encoded files
|
||||
|
||||
@@ -28,10 +31,10 @@ Usage:
|
||||
ski [-hm] files...
|
||||
|
||||
Flags:
|
||||
-d Hex encoding mode.
|
||||
-h Print this help message.
|
||||
-m All SKIs should match; as soon as an SKI mismatch is found,
|
||||
it is reported.
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -39,14 +42,14 @@ func init() {
|
||||
flag.Usage = func() { usage(os.Stderr) }
|
||||
}
|
||||
|
||||
func parse(path string) (public []byte, kt, ft string) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
func parse(path string) ([]byte, string, string) {
|
||||
data, err := os.ReadFile(path)
|
||||
die.If(err)
|
||||
|
||||
data = bytes.TrimSpace(data)
|
||||
p, rest := pem.Decode(data)
|
||||
if len(rest) > 0 {
|
||||
lib.Warnx("trailing data in PEM file")
|
||||
_, _ = lib.Warnx("trailing data in PEM file")
|
||||
}
|
||||
|
||||
if p == nil {
|
||||
@@ -55,6 +58,12 @@ func parse(path string) (public []byte, kt, ft string) {
|
||||
|
||||
data = p.Bytes
|
||||
|
||||
var (
|
||||
public []byte
|
||||
kt string
|
||||
ft string
|
||||
)
|
||||
|
||||
switch p.Type {
|
||||
case "PRIVATE KEY", "RSA PRIVATE KEY", "EC PRIVATE KEY":
|
||||
public, kt = parseKey(data)
|
||||
@@ -69,84 +78,74 @@ func parse(path string) (public []byte, kt, ft string) {
|
||||
die.With("unknown PEM type %s", p.Type)
|
||||
}
|
||||
|
||||
return
|
||||
return public, kt, ft
|
||||
}
|
||||
|
||||
func parseKey(data []byte) (public []byte, kt string) {
|
||||
privInterface, err := x509.ParsePKCS8PrivateKey(data)
|
||||
func parseKey(data []byte) ([]byte, string) {
|
||||
priv, err := certlib.ParsePrivateKeyDER(data)
|
||||
if err != nil {
|
||||
privInterface, err = x509.ParsePKCS1PrivateKey(data)
|
||||
if err != nil {
|
||||
privInterface, err = x509.ParseECPrivateKey(data)
|
||||
if err != nil {
|
||||
die.With("couldn't parse private key.")
|
||||
}
|
||||
}
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
var priv crypto.Signer
|
||||
switch privInterface.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
priv = privInterface.(*rsa.PrivateKey)
|
||||
kt = "RSA"
|
||||
case *ecdsa.PrivateKey:
|
||||
priv = privInterface.(*ecdsa.PrivateKey)
|
||||
kt = "ECDSA"
|
||||
var kt string
|
||||
switch priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
kt = keyTypeRSA
|
||||
case *ecdsa.PublicKey:
|
||||
kt = keyTypeECDSA
|
||||
default:
|
||||
die.With("unknown private key type %T", privInterface)
|
||||
die.With("unknown private key type %T", priv)
|
||||
}
|
||||
|
||||
public, err = x509.MarshalPKIXPublicKey(priv.Public())
|
||||
public, err := x509.MarshalPKIXPublicKey(priv.Public())
|
||||
die.If(err)
|
||||
|
||||
return
|
||||
return public, kt
|
||||
}
|
||||
|
||||
func parseCertificate(data []byte) (public []byte, kt string) {
|
||||
func parseCertificate(data []byte) ([]byte, string) {
|
||||
cert, err := x509.ParseCertificate(data)
|
||||
die.If(err)
|
||||
|
||||
pub := cert.PublicKey
|
||||
var kt string
|
||||
switch pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
kt = "RSA"
|
||||
kt = keyTypeRSA
|
||||
case *ecdsa.PublicKey:
|
||||
kt = "ECDSA"
|
||||
kt = keyTypeECDSA
|
||||
default:
|
||||
die.With("unknown public key type %T", pub)
|
||||
}
|
||||
|
||||
public, err = x509.MarshalPKIXPublicKey(pub)
|
||||
public, err := x509.MarshalPKIXPublicKey(pub)
|
||||
die.If(err)
|
||||
return
|
||||
return public, kt
|
||||
}
|
||||
|
||||
func parseCSR(data []byte) (public []byte, kt string) {
|
||||
csr, err := x509.ParseCertificateRequest(data)
|
||||
func parseCSR(data []byte) ([]byte, string) {
|
||||
// Use certlib to support both PEM and DER and to centralize validation.
|
||||
csr, _, err := certlib.ParseCSR(data)
|
||||
die.If(err)
|
||||
|
||||
pub := csr.PublicKey
|
||||
var kt string
|
||||
switch pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
kt = "RSA"
|
||||
kt = keyTypeRSA
|
||||
case *ecdsa.PublicKey:
|
||||
kt = "ECDSA"
|
||||
kt = keyTypeECDSA
|
||||
default:
|
||||
die.With("unknown public key type %T", pub)
|
||||
}
|
||||
|
||||
public, err = x509.MarshalPKIXPublicKey(pub)
|
||||
public, err := x509.MarshalPKIXPublicKey(pub)
|
||||
die.If(err)
|
||||
return
|
||||
return public, kt
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
var s string
|
||||
for i := range in {
|
||||
s += fmt.Sprintf("%02X:", in[i])
|
||||
}
|
||||
|
||||
return strings.Trim(s, ":")
|
||||
func dumpHex(in []byte, mode lib.HexEncodeMode) string {
|
||||
return lib.HexEncode(in, mode)
|
||||
}
|
||||
|
||||
type subjectPublicKeyInfo struct {
|
||||
@@ -156,10 +155,14 @@ type subjectPublicKeyInfo struct {
|
||||
|
||||
func main() {
|
||||
var help, shouldMatch bool
|
||||
var displayModeString string
|
||||
flag.StringVar(&displayModeString, "d", "lower", "hex encoding mode")
|
||||
flag.BoolVar(&help, "h", false, "print a help message and exit")
|
||||
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
|
||||
flag.Parse()
|
||||
|
||||
displayMode := lib.ParseHexEncodeMode(displayModeString)
|
||||
|
||||
if help {
|
||||
usage(os.Stdout)
|
||||
os.Exit(0)
|
||||
@@ -172,18 +175,18 @@ func main() {
|
||||
var subPKI subjectPublicKeyInfo
|
||||
_, err := asn1.Unmarshal(public, &subPKI)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to get subject PKI")
|
||||
_, _ = lib.Warn(err, "failed to get subject PKI")
|
||||
continue
|
||||
}
|
||||
|
||||
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
|
||||
pubHashString := dumpHex(pubHash[:])
|
||||
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) // #nosec G401 this is the standard
|
||||
pubHashString := dumpHex(pubHash[:], displayMode)
|
||||
if ski == "" {
|
||||
ski = pubHashString
|
||||
}
|
||||
|
||||
if shouldMatch && ski != pubHashString {
|
||||
lib.Warnx("%s: SKI mismatch (%s != %s)",
|
||||
_, _ = lib.Warnx("%s: SKI mismatch (%s != %s)",
|
||||
path, ski, pubHashString)
|
||||
}
|
||||
fmt.Printf("%s %s (%s %s)\n", path, pubHashString, kt, ft)
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func proxy(conn net.Conn, inside string) error {
|
||||
proxyConn, err := net.Dial("tcp", inside)
|
||||
proxyConn, err := (&net.Dialer{}).DialContext(context.Background(), "tcp", inside)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -19,7 +20,7 @@ func proxy(conn net.Conn, inside string) error {
|
||||
defer conn.Close()
|
||||
|
||||
go func() {
|
||||
io.Copy(conn, proxyConn)
|
||||
_, _ = io.Copy(conn, proxyConn)
|
||||
}()
|
||||
_, err = io.Copy(proxyConn, conn)
|
||||
return err
|
||||
@@ -31,16 +32,22 @@ func main() {
|
||||
flag.StringVar(&inside, "p", "4000", "inside port")
|
||||
flag.Parse()
|
||||
|
||||
l, err := net.Listen("tcp", "0.0.0.0:"+outside)
|
||||
lc := &net.ListenConfig{}
|
||||
l, err := lc.Listen(context.Background(), "tcp", "0.0.0.0:"+outside)
|
||||
die.If(err)
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
var conn net.Conn
|
||||
conn, err = l.Accept()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
_, _ = lib.Warn(err, "accept failed")
|
||||
continue
|
||||
}
|
||||
|
||||
go proxy(conn, "127.0.0.1:"+inside)
|
||||
go func() {
|
||||
if err = proxy(conn, "127.0.0.1:"+inside); err != nil {
|
||||
_, _ = lib.Warn(err, "proxy error")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
@@ -8,7 +9,6 @@ import (
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := &tls.Config{}
|
||||
cfg := &tls.Config{} // #nosec G402
|
||||
|
||||
var sysRoot, listenAddr, certFile, keyFile string
|
||||
var verify bool
|
||||
@@ -47,7 +47,8 @@ func main() {
|
||||
}
|
||||
cfg.Certificates = append(cfg.Certificates, cert)
|
||||
if sysRoot != "" {
|
||||
pemList, err := ioutil.ReadFile(sysRoot)
|
||||
var pemList []byte
|
||||
pemList, err = os.ReadFile(sysRoot)
|
||||
die.If(err)
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
@@ -59,48 +60,54 @@ func main() {
|
||||
cfg.RootCAs = roots
|
||||
}
|
||||
|
||||
l, err := net.Listen("tcp", listenAddr)
|
||||
lc := &net.ListenConfig{}
|
||||
l, err := lc.Listen(context.Background(), "tcp", listenAddr)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
var conn net.Conn
|
||||
conn, err = l.Accept()
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
raddr := conn.RemoteAddr()
|
||||
tconn := tls.Server(conn, cfg)
|
||||
err = tconn.Handshake()
|
||||
if err != nil {
|
||||
fmt.Printf("[+] %v: failed to complete handshake: %v\n", raddr, err)
|
||||
continue
|
||||
}
|
||||
cs := tconn.ConnectionState()
|
||||
if len(cs.PeerCertificates) == 0 {
|
||||
fmt.Printf("[+] %v: no chain presented\n", raddr)
|
||||
continue
|
||||
}
|
||||
|
||||
var chain []byte
|
||||
for _, cert := range cs.PeerCertificates {
|
||||
p := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
chain = append(chain, pem.EncodeToMemory(p)...)
|
||||
}
|
||||
|
||||
var nonce [16]byte
|
||||
_, err = rand.Read(nonce[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fname := fmt.Sprintf("%v-%v.pem", raddr, hex.EncodeToString(nonce[:]))
|
||||
err = ioutil.WriteFile(fname, chain, 0644)
|
||||
die.If(err)
|
||||
fmt.Printf("%v: [+] wrote %v.\n", raddr, fname)
|
||||
handleConn(conn, cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConn performs a TLS handshake, extracts the peer chain, and writes it to a file.
|
||||
func handleConn(conn net.Conn, cfg *tls.Config) {
|
||||
defer conn.Close()
|
||||
raddr := conn.RemoteAddr()
|
||||
tconn := tls.Server(conn, cfg)
|
||||
if err := tconn.HandshakeContext(context.Background()); err != nil {
|
||||
fmt.Printf("[+] %v: failed to complete handshake: %v\n", raddr, err)
|
||||
return
|
||||
}
|
||||
cs := tconn.ConnectionState()
|
||||
if len(cs.PeerCertificates) == 0 {
|
||||
fmt.Printf("[+] %v: no chain presented\n", raddr)
|
||||
return
|
||||
}
|
||||
|
||||
var chain []byte
|
||||
for _, cert := range cs.PeerCertificates {
|
||||
p := &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
|
||||
chain = append(chain, pem.EncodeToMemory(p)...)
|
||||
}
|
||||
|
||||
var nonce [16]byte
|
||||
if _, err := rand.Read(nonce[:]); err != nil {
|
||||
fmt.Printf("[+] %v: failed to generate filename nonce: %v\n", raddr, err)
|
||||
return
|
||||
}
|
||||
fname := fmt.Sprintf("%v-%v.pem", raddr, hex.EncodeToString(nonce[:]))
|
||||
if err := os.WriteFile(fname, chain, 0o644); err != nil {
|
||||
fmt.Printf("[+] %v: failed to write %v: %v\n", raddr, fname, err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%v: [+] wrote %v.\n", raddr, fname)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
var cfg = &tls.Config{}
|
||||
var cfg = &tls.Config{} // #nosec G402
|
||||
|
||||
var sysRoot, serverName string
|
||||
flag.StringVar(&sysRoot, "ca", "", "provide an alternate CA bundle")
|
||||
@@ -23,7 +23,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if sysRoot != "" {
|
||||
pemList, err := ioutil.ReadFile(sysRoot)
|
||||
pemList, err := os.ReadFile(sysRoot)
|
||||
die.If(err)
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
@@ -44,10 +44,13 @@ func main() {
|
||||
if err != nil {
|
||||
site += ":443"
|
||||
}
|
||||
conn, err := tls.Dial("tcp", site, cfg)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
d := &tls.Dialer{Config: cfg}
|
||||
nc, err := d.DialContext(context.Background(), "tcp", site)
|
||||
die.If(err)
|
||||
|
||||
conn, ok := nc.(*tls.Conn)
|
||||
if !ok {
|
||||
die.With("invalid TLS connection (not a *tls.Conn)")
|
||||
}
|
||||
|
||||
cs := conn.ConnectionState()
|
||||
@@ -61,8 +64,9 @@ func main() {
|
||||
chain = append(chain, pem.EncodeToMemory(p)...)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(site+".pem", chain, 0644)
|
||||
err = os.WriteFile(site+".pem", chain, 0644)
|
||||
die.If(err)
|
||||
|
||||
fmt.Printf("[+] wrote %s.pem.\n", site)
|
||||
}
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func printDigests(paths []string, issuer bool) {
|
||||
for _, path := range paths {
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to load certificate from %s", path)
|
||||
_, _ = lib.Warn(err, "failed to load certificate from %s", path)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -75,20 +75,19 @@ func matchDigests(paths []string, issuer bool) {
|
||||
}
|
||||
|
||||
var invalid int
|
||||
for {
|
||||
if len(paths) == 0 {
|
||||
break
|
||||
}
|
||||
for len(paths) > 0 {
|
||||
fst := paths[0]
|
||||
snd := paths[1]
|
||||
paths = paths[2:]
|
||||
|
||||
fstCert, err := certlib.LoadCertificate(fst)
|
||||
die.If(err)
|
||||
|
||||
sndCert, err := certlib.LoadCertificate(snd)
|
||||
die.If(err)
|
||||
|
||||
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
|
||||
lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
||||
_, _ = lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
||||
invalid++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -13,16 +17,23 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
hostPort := os.Args[1]
|
||||
conn, err := tls.Dial("tcp", hostPort, &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
})
|
||||
hostPort, err := hosts.ParseHost(os.Args[1])
|
||||
die.If(err)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to connect to the TLS server: %v\n", err)
|
||||
os.Exit(1)
|
||||
d := &tls.Dialer{Config: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}} // #nosec G402
|
||||
|
||||
nc, err := d.DialContext(context.Background(), "tcp", hostPort.String())
|
||||
die.If(err)
|
||||
|
||||
conn, ok := nc.(*tls.Conn)
|
||||
if !ok {
|
||||
die.With("invalid TLS connection (not a *tls.Conn)")
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
printConnectionDetails(state)
|
||||
}
|
||||
@@ -37,7 +48,6 @@ func printConnectionDetails(state tls.ConnectionState) {
|
||||
|
||||
func tlsVersion(version uint16) string {
|
||||
switch version {
|
||||
|
||||
case tls.VersionTLS13:
|
||||
return "TLS 1.3"
|
||||
case tls.VersionTLS12:
|
||||
|
||||
@@ -11,10 +11,9 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
@@ -32,7 +31,7 @@ const (
|
||||
curveP521
|
||||
)
|
||||
|
||||
func getECCurve(pub interface{}) int {
|
||||
func getECCurve(pub any) int {
|
||||
switch pub := pub.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
@@ -52,42 +51,88 @@ func getECCurve(pub interface{}) int {
|
||||
}
|
||||
}
|
||||
|
||||
// matchRSA compares an RSA public key from certificate against RSA public key from private key.
|
||||
// It returns true on match.
|
||||
func matchRSA(certPub *rsa.PublicKey, keyPub *rsa.PublicKey) bool {
|
||||
return keyPub.N.Cmp(certPub.N) == 0 && keyPub.E == certPub.E
|
||||
}
|
||||
|
||||
// matchECDSA compares ECDSA public keys for equality and compatible curve.
|
||||
// It returns match=true when they are on the same curve and have the same X/Y.
|
||||
// If curves mismatch, match is false.
|
||||
func matchECDSA(certPub *ecdsa.PublicKey, keyPub *ecdsa.PublicKey) bool {
|
||||
if getECCurve(certPub) != getECCurve(keyPub) {
|
||||
return false
|
||||
}
|
||||
if keyPub.X.Cmp(certPub.X) != 0 {
|
||||
return false
|
||||
}
|
||||
if keyPub.Y.Cmp(certPub.Y) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// matchKeys determines whether the certificate's public key matches the given private key.
|
||||
// It returns true if they match; otherwise, it returns false and a human-friendly reason.
|
||||
func matchKeys(cert *x509.Certificate, priv crypto.Signer) (bool, string) {
|
||||
switch keyPub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
switch certPub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if matchRSA(certPub, keyPub) {
|
||||
return true, ""
|
||||
}
|
||||
return false, "public keys don't match"
|
||||
case *ecdsa.PublicKey:
|
||||
return false, "RSA private key, EC public key"
|
||||
default:
|
||||
return false, fmt.Sprintf("unsupported certificate public key type: %T", cert.PublicKey)
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
switch certPub := cert.PublicKey.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
if matchECDSA(certPub, keyPub) {
|
||||
return true, ""
|
||||
}
|
||||
// Determine a more precise reason
|
||||
kc := getECCurve(keyPub)
|
||||
cc := getECCurve(certPub)
|
||||
if kc == curveInvalid {
|
||||
return false, "invalid private key curve"
|
||||
}
|
||||
if cc == curveRSA {
|
||||
return false, "private key is EC, certificate is RSA"
|
||||
}
|
||||
if kc != cc {
|
||||
return false, "EC curves don't match"
|
||||
}
|
||||
return false, "public keys don't match"
|
||||
case *rsa.PublicKey:
|
||||
return false, "private key is EC, certificate is RSA"
|
||||
default:
|
||||
return false, fmt.Sprintf("unsupported certificate public key type: %T", cert.PublicKey)
|
||||
}
|
||||
default:
|
||||
return false, fmt.Sprintf("unrecognised private key type: %T", priv.Public())
|
||||
}
|
||||
}
|
||||
|
||||
func loadKey(path string) (crypto.Signer, error) {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
in, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
in = bytes.TrimSpace(in)
|
||||
p, _ := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p, _ := pem.Decode(in); p != nil {
|
||||
if !validPEMs[p.Type] {
|
||||
return nil, errors.New("invalid private key file type " + p.Type)
|
||||
}
|
||||
in = p.Bytes
|
||||
return certlib.ParsePrivateKeyPEM(in)
|
||||
}
|
||||
|
||||
priv, err := x509.ParsePKCS8PrivateKey(in)
|
||||
if err != nil {
|
||||
priv, err = x509.ParsePKCS1PrivateKey(in)
|
||||
if err != nil {
|
||||
priv, err = x509.ParseECPrivateKey(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return priv.(*rsa.PrivateKey), nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return priv.(*ecdsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// should never reach here
|
||||
return nil, errors.New("invalid private key")
|
||||
|
||||
return certlib.ParsePrivateKeyDER(in)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -96,7 +141,7 @@ func main() {
|
||||
flag.StringVar(&certFile, "c", "", "TLS `certificate` file")
|
||||
flag.Parse()
|
||||
|
||||
in, err := ioutil.ReadFile(certFile)
|
||||
in, err := os.ReadFile(certFile)
|
||||
die.If(err)
|
||||
|
||||
p, _ := pem.Decode(in)
|
||||
@@ -112,50 +157,11 @@ func main() {
|
||||
priv, err := loadKey(keyFile)
|
||||
die.If(err)
|
||||
|
||||
switch pub := priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
switch certPub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if pub.N.Cmp(certPub.N) != 0 || pub.E != certPub.E {
|
||||
fmt.Println("No match (public keys don't match).")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Match.")
|
||||
return
|
||||
case *ecdsa.PublicKey:
|
||||
fmt.Println("No match (RSA private key, EC public key).")
|
||||
os.Exit(1)
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
privCurve := getECCurve(pub)
|
||||
certCurve := getECCurve(cert.PublicKey)
|
||||
log.Printf("priv: %d\tcert: %d\n", privCurve, certCurve)
|
||||
|
||||
if certCurve == curveRSA {
|
||||
fmt.Println("No match (private key is EC, certificate is RSA).")
|
||||
os.Exit(1)
|
||||
} else if privCurve == curveInvalid {
|
||||
fmt.Println("No match (invalid private key curve).")
|
||||
os.Exit(1)
|
||||
} else if privCurve != certCurve {
|
||||
fmt.Println("No match (EC curves don't match).")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
certPub := cert.PublicKey.(*ecdsa.PublicKey)
|
||||
if pub.X.Cmp(certPub.X) != 0 {
|
||||
fmt.Println("No match (public keys don't match).")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if pub.Y.Cmp(certPub.Y) != 0 {
|
||||
fmt.Println("No match (public keys don't match).")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
matched, reason := matchKeys(cert, priv)
|
||||
if matched {
|
||||
fmt.Println("Match.")
|
||||
default:
|
||||
fmt.Printf("Unrecognised private key type: %T\n", priv.Public())
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
fmt.Printf("No match (%s).\n", reason)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -201,10 +201,6 @@ func init() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if fromLoc == time.UTC {
|
||||
|
||||
}
|
||||
|
||||
toLoc = time.UTC
|
||||
}
|
||||
|
||||
@@ -257,15 +253,16 @@ func main() {
|
||||
showTime(time.Now())
|
||||
os.Exit(0)
|
||||
case 1:
|
||||
if flag.Arg(0) == "-" {
|
||||
switch {
|
||||
case flag.Arg(0) == "-":
|
||||
s := bufio.NewScanner(os.Stdin)
|
||||
|
||||
for s.Scan() {
|
||||
times = append(times, s.Text())
|
||||
}
|
||||
} else if flag.Arg(0) == "help" {
|
||||
case flag.Arg(0) == "help":
|
||||
usageExamples()
|
||||
} else {
|
||||
default:
|
||||
times = flag.Args()
|
||||
}
|
||||
default:
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
@@ -12,9 +11,8 @@ import (
|
||||
|
||||
type empty struct{}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
format += "\n"
|
||||
fmt.Fprintf(os.Stderr, format, args...)
|
||||
func errorf(path string, err error) {
|
||||
fmt.Fprintf(os.Stderr, "%s FAILED: %s\n", path, err)
|
||||
}
|
||||
|
||||
func usage(w io.Writer) {
|
||||
@@ -44,16 +42,16 @@ func main() {
|
||||
|
||||
if flag.NArg() == 1 && flag.Arg(0) == "-" {
|
||||
path := "stdin"
|
||||
in, err := ioutil.ReadAll(os.Stdin)
|
||||
in, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
errorf("%s FAILED: %s", path, err)
|
||||
errorf(path, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var e empty
|
||||
err = yaml.Unmarshal(in, &e)
|
||||
if err != nil {
|
||||
errorf("%s FAILED: %s", path, err)
|
||||
errorf(path, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -65,16 +63,16 @@ func main() {
|
||||
}
|
||||
|
||||
for _, path := range flag.Args() {
|
||||
in, err := ioutil.ReadFile(path)
|
||||
in, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
errorf("%s FAILED: %s", path, err)
|
||||
errorf(path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var e empty
|
||||
err = yaml.Unmarshal(in, &e)
|
||||
if err != nil {
|
||||
errorf("%s FAILED: %s", path, err)
|
||||
errorf(path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -14,16 +14,16 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
const defaultDirectory = ".git/objects"
|
||||
|
||||
func errorf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
if format[len(format)-1] != '\n' {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
}
|
||||
}
|
||||
// maxDecompressedSize limits how many bytes we will decompress from a zlib
|
||||
// stream to mitigate decompression bombs (gosec G110).
|
||||
// Increase this if you expect larger objects.
|
||||
const maxDecompressedSize int64 = 64 << 30 // 64 GiB
|
||||
|
||||
func isDir(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
@@ -48,17 +48,21 @@ func loadFile(path string) ([]byte, error) {
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
_, err = io.Copy(buf, zread)
|
||||
if err != nil {
|
||||
// Protect against decompression bombs by limiting how much we read.
|
||||
lr := io.LimitReader(zread, maxDecompressedSize+1)
|
||||
if _, err = buf.ReadFrom(lr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int64(buf.Len()) > maxDecompressedSize {
|
||||
return nil, fmt.Errorf("decompressed size exceeds limit (%d bytes)", maxDecompressedSize)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func showFile(path string) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
lib.Warn(err, "failed to load %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -68,39 +72,71 @@ func showFile(path string) {
|
||||
func searchFile(path string, search *regexp.Regexp) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
lib.Warn(err, "failed to open %s", path)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
zread, err := zlib.NewReader(file)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
lib.Warn(err, "failed to decompress %s", path)
|
||||
return err
|
||||
}
|
||||
defer zread.Close()
|
||||
|
||||
zbuf := bufio.NewReader(zread)
|
||||
if search.MatchReader(zbuf) {
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s:\n%s\n", path, fileData)
|
||||
// Limit how much we scan to avoid DoS via huge decompression.
|
||||
lr := io.LimitReader(zread, maxDecompressedSize+1)
|
||||
zbuf := bufio.NewReader(lr)
|
||||
if !search.MatchReader(zbuf) {
|
||||
return nil
|
||||
}
|
||||
|
||||
fileData, err := loadFile(path)
|
||||
if err != nil {
|
||||
lib.Warn(err, "failed to load %s", path)
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s:\n%s\n", path, fileData)
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode().IsRegular() {
|
||||
return searchFile(path, searchExpr)
|
||||
return func(path string, info os.FileInfo, _ error) error {
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return searchFile(path, searchExpr)
|
||||
}
|
||||
}
|
||||
|
||||
// runSearch compiles the search expression and processes the provided paths.
|
||||
// It returns an error for fatal conditions; per-file errors are logged.
|
||||
func runSearch(expr string) error {
|
||||
search, err := regexp.Compile(expr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid regexp: %w", err)
|
||||
}
|
||||
|
||||
pathList := flag.Args()
|
||||
if len(pathList) == 0 {
|
||||
pathList = []string{defaultDirectory}
|
||||
}
|
||||
|
||||
for _, path := range pathList {
|
||||
if isDir(path) {
|
||||
if err2 := filepath.Walk(path, buildWalker(search)); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err2 := searchFile(path, search); err2 != nil {
|
||||
// Non-fatal: keep going, but report it.
|
||||
lib.Warn(err2, "non-fatal error while searching files")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
|
||||
flag.Parse()
|
||||
@@ -109,28 +145,10 @@ func main() {
|
||||
for _, path := range flag.Args() {
|
||||
showFile(path)
|
||||
}
|
||||
} else {
|
||||
search, err := regexp.Compile(*flSearch)
|
||||
if err != nil {
|
||||
errorf("Bad regexp: %v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pathList := flag.Args()
|
||||
if len(pathList) == 0 {
|
||||
pathList = []string{defaultDirectory}
|
||||
}
|
||||
|
||||
for _, path := range pathList {
|
||||
if isDir(path) {
|
||||
err := filepath.Walk(path, buildWalker(search))
|
||||
if err != nil {
|
||||
errorf("%v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
searchFile(path, search)
|
||||
}
|
||||
}
|
||||
if err := runSearch(*flSearch); err != nil {
|
||||
lib.Err(lib.ExitFailure, err, "failed to run search")
|
||||
}
|
||||
}
|
||||
|
||||
4
go.mod
4
go.mod
@@ -5,7 +5,6 @@ go 1.24.0
|
||||
require (
|
||||
github.com/hashicorp/go-syslog v1.0.0
|
||||
github.com/kr/text v0.2.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.12.0
|
||||
golang.org/x/crypto v0.44.0
|
||||
golang.org/x/sys v0.38.0
|
||||
@@ -13,12 +12,15 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/certificate-transparency-go v1.0.21
|
||||
rsc.io/qr v0.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
)
|
||||
|
||||
10
go.sum
10
go.sum
@@ -1,3 +1,5 @@
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -25,19 +27,15 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
@@ -46,3 +44,5 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build freebsd darwin,386 netbsd
|
||||
//go:build bsd
|
||||
|
||||
package lib
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build unix linux openbsd darwin,amd64
|
||||
//go:build unix || linux || openbsd || (darwin && amd64)
|
||||
|
||||
package lib
|
||||
|
||||
@@ -18,7 +18,7 @@ type FileTime struct {
|
||||
|
||||
func timeSpecToTime(ts unix.Timespec) time.Time {
|
||||
// The casts to int64 are needed because on 386, these are int32s.
|
||||
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
|
||||
return time.Unix(ts.Sec, ts.Nsec)
|
||||
}
|
||||
|
||||
// LoadFileTime returns a FileTime associated with the file.
|
||||
|
||||
161
lib/lib.go
161
lib/lib.go
@@ -2,14 +2,22 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var progname = filepath.Base(os.Args[0])
|
||||
|
||||
const (
|
||||
daysInYear = 365
|
||||
digitWidth = 10
|
||||
hoursInQuarterDay = 6
|
||||
)
|
||||
|
||||
// ProgName returns what lib thinks the program name is, namely the
|
||||
// basename of argv0.
|
||||
//
|
||||
@@ -20,7 +28,7 @@ func ProgName() string {
|
||||
|
||||
// Warnx displays a formatted error message to standard error, à la
|
||||
// warnx(3).
|
||||
func Warnx(format string, a ...interface{}) (int, error) {
|
||||
func Warnx(format string, a ...any) (int, error) {
|
||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||
format += "\n"
|
||||
return fmt.Fprintf(os.Stderr, format, a...)
|
||||
@@ -28,7 +36,7 @@ func Warnx(format string, a ...interface{}) (int, error) {
|
||||
|
||||
// Warn displays a formatted error message to standard output,
|
||||
// appending the error string, à la warn(3).
|
||||
func Warn(err error, format string, a ...interface{}) (int, error) {
|
||||
func Warn(err error, format string, a ...any) (int, error) {
|
||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||
format += ": %v\n"
|
||||
a = append(a, err)
|
||||
@@ -37,7 +45,7 @@ func Warn(err error, format string, a ...interface{}) (int, error) {
|
||||
|
||||
// Errx displays a formatted error message to standard error and exits
|
||||
// with the status code from `exit`, à la errx(3).
|
||||
func Errx(exit int, format string, a ...interface{}) {
|
||||
func Errx(exit int, format string, a ...any) {
|
||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||
format += "\n"
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
@@ -47,7 +55,7 @@ func Errx(exit int, format string, a ...interface{}) {
|
||||
// Err displays a formatting error message to standard error,
|
||||
// appending the error string, and exits with the status code from
|
||||
// `exit`, à la err(3).
|
||||
func Err(exit int, err error, format string, a ...interface{}) {
|
||||
func Err(exit int, err error, format string, a ...any) {
|
||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||
format += ": %v\n"
|
||||
a = append(a, err)
|
||||
@@ -62,30 +70,30 @@ func Itoa(i int, wid int) string {
|
||||
// Assemble decimal in reverse order.
|
||||
var b [20]byte
|
||||
bp := len(b) - 1
|
||||
for i >= 10 || wid > 1 {
|
||||
for i >= digitWidth || wid > 1 {
|
||||
wid--
|
||||
q := i / 10
|
||||
b[bp] = byte('0' + i - q*10)
|
||||
q := i / digitWidth
|
||||
b[bp] = byte('0' + i - q*digitWidth)
|
||||
bp--
|
||||
i = q
|
||||
}
|
||||
// i < 10
|
||||
|
||||
b[bp] = byte('0' + i)
|
||||
return string(b[bp:])
|
||||
}
|
||||
|
||||
var (
|
||||
dayDuration = 24 * time.Hour
|
||||
yearDuration = (365 * dayDuration) + (6 * time.Hour)
|
||||
yearDuration = (daysInYear * dayDuration) + (hoursInQuarterDay * time.Hour)
|
||||
)
|
||||
|
||||
// Duration returns a prettier string for time.Durations.
|
||||
func Duration(d time.Duration) string {
|
||||
var s string
|
||||
if d >= yearDuration {
|
||||
years := d / yearDuration
|
||||
years := int64(d / yearDuration)
|
||||
s += fmt.Sprintf("%dy", years)
|
||||
d -= years * yearDuration
|
||||
d -= time.Duration(years) * yearDuration
|
||||
}
|
||||
|
||||
if d >= dayDuration {
|
||||
@@ -98,8 +106,135 @@ func Duration(d time.Duration) string {
|
||||
}
|
||||
|
||||
d %= 1 * time.Second
|
||||
hours := d / time.Hour
|
||||
d -= hours * time.Hour
|
||||
hours := int64(d / time.Hour)
|
||||
d -= time.Duration(hours) * time.Hour
|
||||
s += fmt.Sprintf("%dh%s", hours, d)
|
||||
return s
|
||||
}
|
||||
|
||||
type HexEncodeMode uint8
|
||||
|
||||
const (
|
||||
// HexEncodeLower prints the bytes as lowercase hexadecimal.
|
||||
HexEncodeLower HexEncodeMode = iota + 1
|
||||
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
|
||||
HexEncodeUpper
|
||||
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
|
||||
// with colons between each pair of bytes.
|
||||
HexEncodeLowerColon
|
||||
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
|
||||
// with colons between each pair of bytes.
|
||||
HexEncodeUpperColon
|
||||
// HexEncodeBytes prints the string as a sequence of []byte.
|
||||
HexEncodeBytes
|
||||
)
|
||||
|
||||
func (m HexEncodeMode) String() string {
|
||||
switch m {
|
||||
case HexEncodeLower:
|
||||
return "lower"
|
||||
case HexEncodeUpper:
|
||||
return "upper"
|
||||
case HexEncodeLowerColon:
|
||||
return "lcolon"
|
||||
case HexEncodeUpperColon:
|
||||
return "ucolon"
|
||||
case HexEncodeBytes:
|
||||
return "bytes"
|
||||
default:
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
}
|
||||
|
||||
func ParseHexEncodeMode(s string) HexEncodeMode {
|
||||
switch strings.ToLower(s) {
|
||||
case "lower":
|
||||
return HexEncodeLower
|
||||
case "upper":
|
||||
return HexEncodeUpper
|
||||
case "lcolon":
|
||||
return HexEncodeLowerColon
|
||||
case "ucolon":
|
||||
return HexEncodeUpperColon
|
||||
case "bytes":
|
||||
return HexEncodeBytes
|
||||
}
|
||||
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
|
||||
func hexColons(s string) string {
|
||||
if len(s)%2 != 0 {
|
||||
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
|
||||
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
|
||||
panic("invalid hex string length")
|
||||
}
|
||||
|
||||
n := len(s)
|
||||
if n <= 2 {
|
||||
return s
|
||||
}
|
||||
|
||||
pairCount := n / 2
|
||||
if n%2 != 0 {
|
||||
pairCount++
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(n + pairCount - 1)
|
||||
|
||||
for i := 0; i < n; i += 2 {
|
||||
b.WriteByte(s[i])
|
||||
|
||||
if i+1 < n {
|
||||
b.WriteByte(s[i+1])
|
||||
}
|
||||
|
||||
if i+2 < n {
|
||||
b.WriteByte(':')
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func hexEncode(b []byte) string {
|
||||
s := hex.EncodeToString(b)
|
||||
|
||||
if len(s)%2 != 0 {
|
||||
s = "0" + s
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func bytesAsByteSliceString(buf []byte) string {
|
||||
sb := &strings.Builder{}
|
||||
sb.WriteString("[]byte{")
|
||||
for i := range buf {
|
||||
fmt.Fprintf(sb, "0x%02x, ", buf[i])
|
||||
}
|
||||
sb.WriteString("}")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// HexEncode encodes the given bytes as a hexadecimal string.
|
||||
func HexEncode(b []byte, mode HexEncodeMode) string {
|
||||
str := hexEncode(b)
|
||||
|
||||
switch mode {
|
||||
case HexEncodeLower:
|
||||
return str
|
||||
case HexEncodeUpper:
|
||||
return strings.ToUpper(str)
|
||||
case HexEncodeLowerColon:
|
||||
return hexColons(str)
|
||||
case HexEncodeUpperColon:
|
||||
return strings.ToUpper(hexColons(str))
|
||||
case HexEncodeBytes:
|
||||
return bytesAsByteSliceString(b)
|
||||
default:
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
}
|
||||
|
||||
79
lib/lib_test.go
Normal file
79
lib/lib_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package lib_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func TestHexEncode_LowerUpper(t *testing.T) {
|
||||
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||
|
||||
gotLower := lib.HexEncode(b, lib.HexEncodeLower)
|
||||
if gotLower != "0fa100ff" {
|
||||
t.Fatalf("lib.HexEncode lower: expected %q, got %q", "0fa100ff", gotLower)
|
||||
}
|
||||
|
||||
gotUpper := lib.HexEncode(b, lib.HexEncodeUpper)
|
||||
if gotUpper != "0FA100FF" {
|
||||
t.Fatalf("lib.HexEncode upper: expected %q, got %q", "0FA100FF", gotUpper)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_ColonModes(t *testing.T) {
|
||||
// Includes leading zero nibble and a zero byte to verify padding and separators
|
||||
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||
|
||||
gotLColon := lib.HexEncode(b, lib.HexEncodeLowerColon)
|
||||
if gotLColon != "0f:a1:00:ff" {
|
||||
t.Fatalf("lib.HexEncode colon lower: expected %q, got %q", "0f:a1:00:ff", gotLColon)
|
||||
}
|
||||
|
||||
gotUColon := lib.HexEncode(b, lib.HexEncodeUpperColon)
|
||||
if gotUColon != "0F:A1:00:FF" {
|
||||
t.Fatalf("lib.HexEncode colon upper: expected %q, got %q", "0F:A1:00:FF", gotUColon)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_EmptyInput(t *testing.T) {
|
||||
var b []byte
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "" {
|
||||
t.Fatalf("empty lower: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "" {
|
||||
t.Fatalf("empty upper: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "" {
|
||||
t.Fatalf("empty colon lower: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "" {
|
||||
t.Fatalf("empty colon upper: expected empty string, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_SingleByte(t *testing.T) {
|
||||
b := []byte{0x0f}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "0f" {
|
||||
t.Fatalf("single byte lower: expected %q, got %q", "0f", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "0F" {
|
||||
t.Fatalf("single byte upper: expected %q, got %q", "0F", got)
|
||||
}
|
||||
// For a single byte, colon modes should not introduce separators
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "0f" {
|
||||
t.Fatalf("single byte colon lower: expected %q, got %q", "0f", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "0F" {
|
||||
t.Fatalf("single byte colon upper: expected %q, got %q", "0F", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_InvalidModePanics(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("expected panic for invalid mode, but function returned normally")
|
||||
}
|
||||
}()
|
||||
// 0 is not a valid lib.HexEncodeMode (valid modes start at 1)
|
||||
_ = lib.HexEncode([]byte{0x01}, lib.HexEncodeMode(0))
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package syslog is a syslog-type facility for logging.
|
||||
// Package log is a syslog-type facility for logging.
|
||||
package log
|
||||
|
||||
import (
|
||||
@@ -17,7 +17,7 @@ type logger struct {
|
||||
writeConsole bool
|
||||
}
|
||||
|
||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...any) {
|
||||
if !strings.HasSuffix(format, "\n") {
|
||||
format += "\n"
|
||||
}
|
||||
@@ -28,33 +28,33 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
|
||||
_ = log.l.WriteLevel(p, fmt.Appendf(nil, format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
||||
func (log *logger) print(p gsyslog.Priority, args ...any) {
|
||||
if p <= log.p && log.writeConsole {
|
||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||
fmt.Print(args...)
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
|
||||
_ = log.l.WriteLevel(p, fmt.Append(nil, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
||||
func (log *logger) println(p gsyslog.Priority, args ...any) {
|
||||
if p <= log.p && log.writeConsole {
|
||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
if log.l != nil {
|
||||
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
|
||||
_ = log.l.WriteLevel(p, fmt.Appendln(nil, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (log *logger) spew(args ...interface{}) {
|
||||
func (log *logger) spew(args ...any) {
|
||||
if log.p == gsyslog.LOG_DEBUG {
|
||||
spew.Dump(args...)
|
||||
}
|
||||
@@ -160,109 +160,109 @@ func Setup(opts *Options) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Debug(args ...interface{}) {
|
||||
func Debug(args ...any) {
|
||||
log.print(gsyslog.LOG_DEBUG, args...)
|
||||
}
|
||||
|
||||
func Info(args ...interface{}) {
|
||||
func Info(args ...any) {
|
||||
log.print(gsyslog.LOG_INFO, args...)
|
||||
}
|
||||
|
||||
func Notice(args ...interface{}) {
|
||||
func Notice(args ...any) {
|
||||
log.print(gsyslog.LOG_NOTICE, args...)
|
||||
}
|
||||
|
||||
func Warning(args ...interface{}) {
|
||||
func Warning(args ...any) {
|
||||
log.print(gsyslog.LOG_WARNING, args...)
|
||||
}
|
||||
|
||||
func Err(args ...interface{}) {
|
||||
func Err(args ...any) {
|
||||
log.print(gsyslog.LOG_ERR, args...)
|
||||
}
|
||||
|
||||
func Crit(args ...interface{}) {
|
||||
func Crit(args ...any) {
|
||||
log.print(gsyslog.LOG_CRIT, args...)
|
||||
}
|
||||
|
||||
func Alert(args ...interface{}) {
|
||||
func Alert(args ...any) {
|
||||
log.print(gsyslog.LOG_ALERT, args...)
|
||||
}
|
||||
|
||||
func Emerg(args ...interface{}) {
|
||||
func Emerg(args ...any) {
|
||||
log.print(gsyslog.LOG_EMERG, args...)
|
||||
}
|
||||
|
||||
func Debugln(args ...interface{}) {
|
||||
func Debugln(args ...any) {
|
||||
log.println(gsyslog.LOG_DEBUG, args...)
|
||||
}
|
||||
|
||||
func Infoln(args ...interface{}) {
|
||||
func Infoln(args ...any) {
|
||||
log.println(gsyslog.LOG_INFO, args...)
|
||||
}
|
||||
|
||||
func Noticeln(args ...interface{}) {
|
||||
func Noticeln(args ...any) {
|
||||
log.println(gsyslog.LOG_NOTICE, args...)
|
||||
}
|
||||
|
||||
func Warningln(args ...interface{}) {
|
||||
func Warningln(args ...any) {
|
||||
log.print(gsyslog.LOG_WARNING, args...)
|
||||
}
|
||||
|
||||
func Errln(args ...interface{}) {
|
||||
func Errln(args ...any) {
|
||||
log.println(gsyslog.LOG_ERR, args...)
|
||||
}
|
||||
|
||||
func Critln(args ...interface{}) {
|
||||
func Critln(args ...any) {
|
||||
log.println(gsyslog.LOG_CRIT, args...)
|
||||
}
|
||||
|
||||
func Alertln(args ...interface{}) {
|
||||
func Alertln(args ...any) {
|
||||
log.println(gsyslog.LOG_ALERT, args...)
|
||||
}
|
||||
|
||||
func Emergln(args ...interface{}) {
|
||||
func Emergln(args ...any) {
|
||||
log.println(gsyslog.LOG_EMERG, args...)
|
||||
}
|
||||
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
func Debugf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
||||
}
|
||||
|
||||
func Infof(format string, args ...interface{}) {
|
||||
func Infof(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_INFO, format, args...)
|
||||
}
|
||||
|
||||
func Noticef(format string, args ...interface{}) {
|
||||
func Noticef(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
||||
}
|
||||
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
func Warningf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_WARNING, format, args...)
|
||||
}
|
||||
|
||||
func Errf(format string, args ...interface{}) {
|
||||
func Errf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||
}
|
||||
|
||||
func Critf(format string, args ...interface{}) {
|
||||
func Critf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_CRIT, format, args...)
|
||||
}
|
||||
|
||||
func Alertf(format string, args ...interface{}) {
|
||||
func Alertf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_ALERT, format, args...)
|
||||
}
|
||||
|
||||
func Emergf(format string, args ...interface{}) {
|
||||
func Emergf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_EMERG, format, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatal(args ...interface{}) {
|
||||
func Fatal(args ...any) {
|
||||
log.println(gsyslog.LOG_ERR, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
func Fatalf(format string, args ...any) {
|
||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func FatalError(err error, message string) {
|
||||
}
|
||||
|
||||
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
||||
func Spew(args ...interface{}) {
|
||||
func Spew(args ...any) {
|
||||
log.spew(args...)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,14 +2,13 @@
|
||||
// consist of timestamps, an actor and event string, and a mapping of
|
||||
// string key-value attribute pairs. For example,
|
||||
//
|
||||
// log.Error("serialiser", "failed to open file",
|
||||
// map[string]string{
|
||||
// "error": err.Error(),
|
||||
// "path": "data.bin",
|
||||
// })
|
||||
// log.Error("serialiser", "failed to open file",
|
||||
// map[string]string{
|
||||
// "error": err.Error(),
|
||||
// "path": "data.bin",
|
||||
// })
|
||||
//
|
||||
// This produces the output message
|
||||
//
|
||||
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
||||
//
|
||||
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
||||
package logging
|
||||
|
||||
@@ -25,8 +25,8 @@ func main() {
|
||||
|
||||
log.Info("example", "filelog test", nil)
|
||||
exampleNewFromFile()
|
||||
os.Remove("example.log")
|
||||
os.Remove("example.err")
|
||||
_ = os.Remove("example.log")
|
||||
_ = os.Remove("example.err")
|
||||
}
|
||||
|
||||
func exampleNewFromFile() {
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package logging
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// File writes its logs to file.
|
||||
type File struct {
|
||||
@@ -8,22 +12,6 @@ type File struct {
|
||||
*LogWriter
|
||||
}
|
||||
|
||||
// Close calls close on the underlying log files.
|
||||
func (fl *File) Close() error {
|
||||
if fl.fo != nil {
|
||||
if err := fl.fo.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
fl.fo = nil
|
||||
}
|
||||
|
||||
if fl.fe != nil {
|
||||
return fl.fe.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFile creates a new Logger that writes all logs to the file
|
||||
// specified by path. If overwrite is specified, the log file will be
|
||||
// truncated before writing. Otherwise, the log file will be appended
|
||||
@@ -36,7 +24,7 @@ func NewFile(path string, overwrite bool) (*File, error) {
|
||||
if overwrite {
|
||||
fl.fo, err = os.Create(path)
|
||||
} else {
|
||||
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0644)
|
||||
fl.fo, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600) // #nosec G302
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -59,7 +47,7 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
||||
if overwrite {
|
||||
fl.fo, err = os.Create(outpath)
|
||||
} else {
|
||||
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
fl.fo, err = os.OpenFile(outpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -69,14 +57,51 @@ func NewSplitFile(outpath, errpath string, overwrite bool) (*File, error) {
|
||||
if overwrite {
|
||||
fl.fe, err = os.Create(errpath)
|
||||
} else {
|
||||
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
fl.fe, err = os.OpenFile(errpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fl.Close()
|
||||
return nil, err
|
||||
if closeErr := fl.Close(); closeErr != nil {
|
||||
return nil, fmt.Errorf("failed to open error log: %w", errors.Join(closeErr, err))
|
||||
}
|
||||
return nil, fmt.Errorf("failed to open error log: %w", err)
|
||||
}
|
||||
|
||||
fl.LogWriter = NewLogWriter(fl.fo, fl.fe)
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
// Close calls close on the underlying log files.
|
||||
func (fl *File) Close() error {
|
||||
if fl.fo != nil {
|
||||
if err := fl.fo.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
fl.fo = nil
|
||||
}
|
||||
|
||||
if fl.fe != nil {
|
||||
return fl.fe.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fl *File) Flush() error {
|
||||
if err := fl.fo.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fl.fe.Sync()
|
||||
}
|
||||
|
||||
func (fl *File) Chmod(mode os.FileMode) error {
|
||||
if err := fl.fo.Chmod(mode); err != nil {
|
||||
return fmt.Errorf("failed to chmod output log: %w", err)
|
||||
}
|
||||
|
||||
if err := fl.fe.Chmod(mode); err != nil {
|
||||
return fmt.Errorf("failed to chmod error log: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,31 +32,6 @@ const (
|
||||
// DefaultLevel is the default logging level when none is provided.
|
||||
const DefaultLevel = LevelInfo
|
||||
|
||||
// Cheap integer to fixed-width decimal ASCII. Give a negative width
|
||||
// to avoid zero-padding. (From log/log.go in the standard library).
|
||||
func itoa(i int, wid int) string {
|
||||
// Assemble decimal in reverse order.
|
||||
var b [20]byte
|
||||
bp := len(b) - 1
|
||||
for i >= 10 || wid > 1 {
|
||||
wid--
|
||||
q := i / 10
|
||||
b[bp] = byte('0' + i - q*10)
|
||||
bp--
|
||||
i = q
|
||||
}
|
||||
// i < 10
|
||||
b[bp] = byte('0' + i)
|
||||
return string(b[bp:])
|
||||
}
|
||||
|
||||
func writeToOut(level Level) bool {
|
||||
if level < LevelWarning {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var levelPrefix = [...]string{
|
||||
LevelDebug: "DEBUG",
|
||||
LevelInfo: "INFO",
|
||||
|
||||
105
logging/log.go
105
logging/log.go
@@ -1,6 +1,7 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -11,64 +12,64 @@ import (
|
||||
//
|
||||
// Log messages consist of four components:
|
||||
//
|
||||
// 1. The **level** attaches a notion of priority to the log message.
|
||||
// Several log levels are available:
|
||||
// 1. The **level** attaches a notion of priority to the log message.
|
||||
// Several log levels are available:
|
||||
//
|
||||
// + FATAL (32): the system is in an unsuable state, and cannot
|
||||
// continue to run. Most of the logging for this will cause the
|
||||
// program to exit with an error code.
|
||||
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
||||
// likely to cause a fatal condition shortly. An example is running
|
||||
// out of disk space. This is something that the ops team should get
|
||||
// paged for.
|
||||
// + ERROR (8): error conditions. A single error doesn't require an
|
||||
// ops team to be paged, but repeated errors should often trigger a
|
||||
// page based on threshold triggers. An example is a network
|
||||
// failure: it might be a transient failure (these do happen), but
|
||||
// most of the time it's self-correcting.
|
||||
// + WARNING (4): warning conditions. An example of this is a bad
|
||||
// request sent to a server. This isn't an error on the part of the
|
||||
// program, but it may be indicative of other things. Like errors,
|
||||
// the ops team shouldn't be paged for errors, but a page might be
|
||||
// triggered if a certain threshold of warnings is reached (which is
|
||||
// typically much higher than errors). For example, repeated
|
||||
// warnings might be a sign that the system is under attack.
|
||||
// + INFO (2): informational message. This is a normal log message
|
||||
// that is used to deliver information, such as recording
|
||||
// requests. Ops teams are never paged for informational
|
||||
// messages. This is the default log level.
|
||||
// + DEBUG (1): debug-level message. These are only used during
|
||||
// development or if a deployed system repeatedly sees abnormal
|
||||
// errors.
|
||||
// + FATAL (32): the system is in an unusable state and cannot
|
||||
// continue to run. Most of the logging for this will cause the
|
||||
// program to exit with an error code.
|
||||
// + CRITICAL (16): critical conditions. The error, if uncorrected, is
|
||||
// likely to cause a fatal condition shortly. An example is running
|
||||
// out of disk space. This is something that the ops team should get
|
||||
// paged for.
|
||||
// + ERROR (8): error conditions. A single error doesn't require an
|
||||
// ops team to be paged, but repeated errors should often trigger a
|
||||
// page based on threshold triggers. An example is a network
|
||||
// failure: it might be a transient failure (these do happen), but
|
||||
// most of the time it's self-correcting.
|
||||
// + WARNING (4): warning conditions. An example of this is a bad
|
||||
// request sent to a server. This isn't an error on the part of the
|
||||
// program, but it may be indicative of other things. Like errors,
|
||||
// the ops team shouldn't be paged for errors, but a page might be
|
||||
// triggered if a certain threshold of warnings is reached (which is
|
||||
// typically much higher than errors). For example, repeated
|
||||
// warnings might be a sign that the system is under attack.
|
||||
// + INFO (2): informational message. This is a normal log message
|
||||
// used to deliver information, such as recording requests. Ops
|
||||
// teams are never paged for informational messages. This is the
|
||||
// default log level.
|
||||
// + DEBUG (1): debug-level message. These are only used during
|
||||
// development or if a deployed system repeatedly sees abnormal
|
||||
// errors.
|
||||
//
|
||||
// The numeric values indicate the priority of a given level.
|
||||
// The numeric values indicate the priority of a given level.
|
||||
//
|
||||
// 2. The **actor** is used to specify which component is generating
|
||||
// the log message. This could be the program name, or it could be
|
||||
// a specific component inside the system.
|
||||
// 2. The **actor** is used to specify which component is generating
|
||||
// the log message. This could be the program name, or it could be
|
||||
// a specific component inside the system.
|
||||
//
|
||||
// 3. The **event** is a short message indicating what happened. This is
|
||||
// most like the traditional log message.
|
||||
// 3. The **event** is a short message indicating what happened. This is
|
||||
// most like the traditional log message.
|
||||
//
|
||||
// 4. The **attributes** are an optional set of key-value string pairs that
|
||||
// provide additional information.
|
||||
// 4. The **attributes** are an optional set of key-value string pairs that
|
||||
// provide additional information.
|
||||
//
|
||||
// Additionally, each log message has an associated timestamp. For the
|
||||
// text-based logs, this is "%FT%T%z"; for the binary logs, this is a
|
||||
// 64-bit Unix timestamp. An example text-based timestamp might look like ::
|
||||
//
|
||||
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
|
||||
// [2016-03-27T20:59:27-0700] [INFO] [actor:server event:request received] client=192.168.2.5 request-size=839
|
||||
//
|
||||
// Note that this is organised in a manner that facilitates parsing::
|
||||
//
|
||||
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
|
||||
// /\[(\d{4}-\d{3}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{4})\] \[(\w+\)]\) \[actor:(.+?) event:(.+?)\]/
|
||||
//
|
||||
// will cover the header:
|
||||
//
|
||||
// + ``$1`` contains the timestamp
|
||||
// + ``$2`` contains the level
|
||||
// + ``$3`` contains the actor
|
||||
// + ``$4`` contains the event
|
||||
// + “$1“ contains the timestamp
|
||||
// + “$2“ contains the level
|
||||
// + “$3“ contains the actor
|
||||
// + “$4“ contains the event.
|
||||
type Logger interface {
|
||||
// SetLevel sets the minimum log level.
|
||||
SetLevel(Level)
|
||||
@@ -131,7 +132,7 @@ func (lw *LogWriter) output(w io.Writer, lvl Level, actor, event string, attrs m
|
||||
}
|
||||
|
||||
// Debug emits a debug-level message. These are only used during
|
||||
// development or if a deployed system repeatedly sees abnormal
|
||||
// development, or if a deployed system repeatedly sees abnormal
|
||||
// errors.
|
||||
//
|
||||
// Actor specifies the component emitting the message; event indicates
|
||||
@@ -213,7 +214,7 @@ func (lw *LogWriter) Critical(actor, event string, attrs map[string]string) {
|
||||
lw.output(lw.we, LevelCritical, actor, event, attrs)
|
||||
}
|
||||
|
||||
// Fatal emits a message indicating that the system is in an unsuable
|
||||
// Fatal emits a message indicating that the system is in an unusable
|
||||
// state, and cannot continue to run. The program will exit with exit
|
||||
// code 1.
|
||||
//
|
||||
@@ -229,9 +230,9 @@ func (lw *LogWriter) Fatal(actor, event string, attrs map[string]string) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// FatalCode emits a message indicating that the system is in an unsuable
|
||||
// FatalCode emits a message indicating that the system is in an unusable
|
||||
// state, and cannot continue to run. The program will exit with the
|
||||
// exit code speicfied in the exitcode argument.
|
||||
// exit code specified in the exitcode argument.
|
||||
//
|
||||
// Actor specifies the component emitting the message; event indicates
|
||||
// the event that caused the log message to be emitted. attrs is a map
|
||||
@@ -245,7 +246,7 @@ func (lw *LogWriter) FatalCode(exitcode int, actor, event string, attrs map[stri
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
|
||||
// FatalNoDie emits a message indicating that the system is in an unsuable
|
||||
// FatalNoDie emits a message indicating that the system is in an unusable
|
||||
// state, and cannot continue to run. The program will not exit; it is
|
||||
// assumed that the caller has some final clean up to perform.
|
||||
//
|
||||
@@ -314,11 +315,17 @@ func (m *Multi) Status() error {
|
||||
}
|
||||
|
||||
func (m *Multi) Close() error {
|
||||
var errs []error
|
||||
for _, l := range m.loggers {
|
||||
l.Close()
|
||||
if err := l.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (m *Multi) Debug(actor, event string, attrs map[string]string) {
|
||||
|
||||
@@ -1,30 +1,32 @@
|
||||
package logging
|
||||
package logging_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/logging"
|
||||
)
|
||||
|
||||
// A list of implementations that should be tested.
|
||||
var implementations []Logger
|
||||
var implementations []logging.Logger
|
||||
|
||||
func init() {
|
||||
lw := NewLogWriter(&bytes.Buffer{}, nil)
|
||||
cw := NewConsole()
|
||||
lw := logging.NewLogWriter(&bytes.Buffer{}, nil)
|
||||
cw := logging.NewConsole()
|
||||
|
||||
implementations = append(implementations, lw)
|
||||
implementations = append(implementations, cw)
|
||||
}
|
||||
|
||||
func TestFileSetup(t *testing.T) {
|
||||
fw1, err := NewFile("fw1.log", true)
|
||||
fw1, err := logging.NewFile("fw1.log", true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new file logger: %v", err)
|
||||
}
|
||||
|
||||
fw2, err := NewSplitFile("fw2.log", "fw2.err", true)
|
||||
fw2, err := logging.NewSplitFile("fw2.log", "fw2.err", true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new split file logger: %v", err)
|
||||
}
|
||||
@@ -33,7 +35,7 @@ func TestFileSetup(t *testing.T) {
|
||||
implementations = append(implementations, fw2)
|
||||
}
|
||||
|
||||
func TestImplementations(t *testing.T) {
|
||||
func TestImplementations(_ *testing.T) {
|
||||
for _, l := range implementations {
|
||||
l.Info("TestImplementations", "Info message",
|
||||
map[string]string{"type": fmt.Sprintf("%T", l)})
|
||||
@@ -44,20 +46,30 @@ func TestImplementations(t *testing.T) {
|
||||
|
||||
func TestCloseLoggers(t *testing.T) {
|
||||
for _, l := range implementations {
|
||||
l.Close()
|
||||
if err := l.Close(); err != nil {
|
||||
t.Errorf("failed to close logger: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDestroyLogFiles(t *testing.T) {
|
||||
os.Remove("fw1.log")
|
||||
os.Remove("fw2.log")
|
||||
os.Remove("fw2.err")
|
||||
if err := os.Remove("fw1.log"); err != nil {
|
||||
t.Errorf("failed to remove fw1.log: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove("fw2.log"); err != nil {
|
||||
t.Errorf("failed to remove fw2.log: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove("fw2.err"); err != nil {
|
||||
t.Errorf("failed to remove fw2.err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMulti(t *testing.T) {
|
||||
c1 := NewConsole()
|
||||
c2 := NewConsole()
|
||||
m := NewMulti(c1, c2)
|
||||
c1 := logging.NewConsole()
|
||||
c2 := logging.NewConsole()
|
||||
m := logging.NewMulti(c1, c2)
|
||||
if !m.Good() {
|
||||
t.Fatal("failed to set up multi logger")
|
||||
}
|
||||
|
||||
@@ -8,15 +8,15 @@ type mwc struct {
|
||||
}
|
||||
|
||||
// Write implements the Writer interface.
|
||||
func (t *mwc) Write(p []byte) (n int, err error) {
|
||||
func (t *mwc) Write(p []byte) (int, error) {
|
||||
for _, w := range t.wcs {
|
||||
n, err = w.Write(p)
|
||||
n, err := w.Write(p)
|
||||
if err != nil {
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
if n != len(p) {
|
||||
err = io.ErrShortWrite
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package mwc
|
||||
package mwc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/assert"
|
||||
"git.wntrmute.dev/kyle/goutils/mwc"
|
||||
"git.wntrmute.dev/kyle/goutils/testio"
|
||||
)
|
||||
|
||||
@@ -12,7 +13,7 @@ func TestMWC(t *testing.T) {
|
||||
buf1 := testio.NewBufCloser(nil)
|
||||
buf2 := testio.NewBufCloser(nil)
|
||||
|
||||
mwc := MultiWriteCloser(buf1, buf2)
|
||||
mwc := mwc.MultiWriteCloser(buf1, buf2)
|
||||
|
||||
_, err := mwc.Write([]byte("hello, world"))
|
||||
assert.NoErrorT(t, err)
|
||||
@@ -30,15 +31,15 @@ func TestMWCShort(t *testing.T) {
|
||||
buf3 := testio.NewBrokenWriter(5)
|
||||
buf4 := testio.NewSilentBrokenWriter(5)
|
||||
|
||||
mwc := MultiWriteCloser(buf1, buf2, buf3)
|
||||
defer mwc.Close()
|
||||
multiWriter := mwc.MultiWriteCloser(buf1, buf2, buf3)
|
||||
defer multiWriter.Close()
|
||||
|
||||
_, err := mwc.Write([]byte("hello, world"))
|
||||
_, err := multiWriter.Write([]byte("hello, world"))
|
||||
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
||||
mwc.Close()
|
||||
multiWriter.Close()
|
||||
|
||||
mwc = MultiWriteCloser(buf1, buf2, buf4)
|
||||
_, err = mwc.Write([]byte("hello, world"))
|
||||
multiWriter = mwc.MultiWriteCloser(buf1, buf2, buf4)
|
||||
_, err = multiWriter.Write([]byte("hello, world"))
|
||||
assert.ErrorT(t, err, "expected a short write error", "but no error occurred")
|
||||
}
|
||||
|
||||
@@ -47,7 +48,7 @@ func TestMWCClose(t *testing.T) {
|
||||
buf2 := testio.NewBufCloser(nil)
|
||||
buf3 := testio.NewBrokenCloser(nil)
|
||||
|
||||
mwc := MultiWriteCloser(buf1, buf2, buf3)
|
||||
mwc := mwc.MultiWriteCloser(buf1, buf2, buf3)
|
||||
_, err := mwc.Write([]byte("hello, world"))
|
||||
assert.NoErrorT(t, err)
|
||||
|
||||
|
||||
49
rand/rand.go
49
rand/rand.go
@@ -1,49 +0,0 @@
|
||||
// Package rand contains utilities for interacting with math/rand, including
|
||||
// seeding from a random sed.
|
||||
package rand
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
mrand "math/rand"
|
||||
)
|
||||
|
||||
// CryptoUint64 generates a cryptographically-secure 64-bit integer.
|
||||
func CryptoUint64() (uint64, error) {
|
||||
bs := make([]byte, 8)
|
||||
_, err := rand.Read(bs)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(bs), nil
|
||||
}
|
||||
|
||||
// Seed initialises the non-cryptographic PRNG with a random,
|
||||
// cryptographically secure value. This is done just as a good
|
||||
// way to make this random. The returned 64-bit value is the seed.
|
||||
func Seed() (uint64, error) {
|
||||
seed, err := CryptoUint64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// NB: this is permitted.
|
||||
mrand.Seed(int64(seed))
|
||||
return seed, nil
|
||||
}
|
||||
|
||||
// Int is a wrapper for math.Int so only one package needs to be imported.
|
||||
func Int() int {
|
||||
return mrand.Int()
|
||||
}
|
||||
|
||||
// Intn is a wrapper for math.Intn so only one package needs to be imported.
|
||||
func Intn(max int) int {
|
||||
return mrand.Intn(max)
|
||||
}
|
||||
|
||||
// Intn2 returns a random value between min and max, inclusive.
|
||||
func Intn2(min, max int) int {
|
||||
return Intn(max-min) + min
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
package rand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCryptoUint64(t *testing.T) {
|
||||
n1, err := CryptoUint64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n2, err := CryptoUint64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// This has such a low chance of occurring that it's likely to be
|
||||
// indicative of a bad CSPRNG.
|
||||
if n1 == n2 {
|
||||
t.Fatalf("repeated random uint64s: %d", n1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntn(t *testing.T) {
|
||||
expected := []int{3081, 4887, 4847, 1059, 3081}
|
||||
mrand.Seed(1)
|
||||
for i := 0; i < 5; i++ {
|
||||
n := Intn2(1000, 5000)
|
||||
|
||||
if n != expected[i] {
|
||||
fmt.Printf("invalid sequence at %d: expected %d, have %d", i, expected[i], n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeed(t *testing.T) {
|
||||
seed1, err := Seed()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var seed2 uint64
|
||||
n1 := Int()
|
||||
tries := 0
|
||||
|
||||
for {
|
||||
seed2, err = Seed()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if seed1 != seed2 {
|
||||
break
|
||||
}
|
||||
|
||||
tries++
|
||||
|
||||
if tries > 3 {
|
||||
t.Fatal("can't generate two unique seeds")
|
||||
}
|
||||
}
|
||||
|
||||
n2 := Int()
|
||||
|
||||
// Again, this not impossible, merely statistically improbably and a
|
||||
// potential canary for RNG issues.
|
||||
if n1 == n2 {
|
||||
t.Fatalf("repeated integers fresh from two unique seeds: %d/%d -> %d",
|
||||
seed1, seed2, n1)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user