Compare commits
104 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7f3f513bdd | |||
| 786f116f54 | |||
| 89aaa969b8 | |||
| f5917ac6fc | |||
| 3e80e46c17 | |||
| 3c1d92db6b | |||
| 25a562865c | |||
| e30e3e9b75 | |||
| 57672c8f78 | |||
| 17e999754b | |||
| c4c9abe310 | |||
| b714c75a43 | |||
| 3f92963c74 | |||
| 51f6d7c74d | |||
| 67bf26c5da | |||
| 62c3db88ef | |||
| bb7749efd1 | |||
| a3a8115279 | |||
| 8ca8538268 | |||
| 155c49cc5e | |||
| dda9fd9f07 | |||
| c251c1e1b5 | |||
| 6eb533f79b | |||
| ea5ffa4828 | |||
| aa96e47112 | |||
| d34a417dce | |||
| d11e0cf9f9 | |||
| aad7d68599 | |||
| 4560868688 | |||
| 8d5406256f | |||
| 9280e846fa | |||
| 0a71661901 | |||
| 804f53d27d | |||
| cfb80355bb | |||
| 77160395a0 | |||
| 37d5e04421 | |||
| dc54eeacbc | |||
| e2a3081ce5 | |||
| 3149d958f4 | |||
| f296344acf | |||
| 3fb2d88a3f | |||
| 150c02b377 | |||
| 83f88c49fe | |||
| 7c437ac45f | |||
| c999bf35b0 | |||
| 4dc135cfe0 | |||
| 790113e189 | |||
| 8348c5fd65 | |||
| 1eafb638a8 | |||
| 3ad562b6fa | |||
| 0f77bd49dc | |||
| f31d74243f | |||
| a573f1cd20 | |||
| f93cf5fa9c | |||
| b879d62384 | |||
| c99ffd4394 | |||
| ed8c07c1c5 | |||
| cf2b016433 | |||
| 2899885c42 | |||
| f3b4838cf6 | |||
| 8ed30e9960 | |||
| c7de3919b0 | |||
| 840066004a | |||
| 9fb93a3802 | |||
| ecc7e5ab1e | |||
| a934c42aa1 | |||
| 948986ba60 | |||
| 3be86573aa | |||
| e3a6355edb | |||
| 66d16acebc | |||
| fdff2e0afe | |||
| 0dcd18c6f1 | |||
| 024d552293 | |||
| 9cd2ced695 | |||
| b92e16fa4d | |||
| 6fbdece4be | |||
| 619c08a13f | |||
| 944a57bf0e | |||
| 0857b29624 | |||
|
|
e95404bfc5 | ||
|
|
924654e7c4 | ||
| 9e0979e07f | |||
|
|
bbc82ff8de | ||
|
|
5fd928f69a | ||
|
|
acefe4a3b9 | ||
| a1452cebc9 | |||
| 6e9812e6f5 | |||
|
|
8c34415c34 | ||
|
|
2cf2c15def | ||
|
|
eaad1884d4 | ||
| 5d57d844d4 | |||
|
|
31b9d175dd | ||
|
|
79e106da2e | ||
|
|
939b1bc272 | ||
|
|
89e74f390b | ||
|
|
7881b6fdfc | ||
|
|
5bef33245f | ||
|
|
84250b0501 | ||
|
|
459e9f880f | ||
|
|
0982f47ce3 | ||
|
|
1dec15fd11 | ||
|
|
2ee9cae5ba | ||
|
|
dc04475120 | ||
|
|
dbbd5116b5 |
@@ -64,4 +64,4 @@ workflows:
|
|||||||
testbuild:
|
testbuild:
|
||||||
jobs:
|
jobs:
|
||||||
- testbuild
|
- testbuild
|
||||||
# - lint
|
- lint
|
||||||
|
|||||||
35
.github/workflows/release.yml
vendored
Normal file
35
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
workflow_dispatch: {}
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
name: GoReleaser
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
cache: true
|
||||||
|
|
||||||
|
- name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v6
|
||||||
|
with:
|
||||||
|
distribution: goreleaser
|
||||||
|
version: latest
|
||||||
|
args: release --clean
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,5 @@
|
|||||||
.idea
|
.idea
|
||||||
|
cmd/cert-bundler/testdata/pkg/*
|
||||||
|
# Added by goreleaser init:
|
||||||
|
dist/
|
||||||
|
cmd/cert-bundler/testdata/bundle/
|
||||||
|
|||||||
@@ -12,12 +12,31 @@
|
|||||||
|
|
||||||
version: "2"
|
version: "2"
|
||||||
|
|
||||||
|
output:
|
||||||
|
sort-order:
|
||||||
|
- file
|
||||||
|
- linter
|
||||||
|
- severity
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Maximum count of issues with the same text.
|
# Maximum count of issues with the same text.
|
||||||
# Set to 0 to disable.
|
# Set to 0 to disable.
|
||||||
# Default: 3
|
# Default: 3
|
||||||
max-same-issues: 50
|
max-same-issues: 50
|
||||||
|
|
||||||
|
# Exclude some lints for CLI programs under cmd/ (package main).
|
||||||
|
# The project allows fmt.Print* in command-line tools; keep forbidigo for libraries.
|
||||||
|
exclude-rules:
|
||||||
|
- path: ^cmd/
|
||||||
|
linters:
|
||||||
|
- forbidigo
|
||||||
|
- path: cmd/.*
|
||||||
|
linters:
|
||||||
|
- forbidigo
|
||||||
|
- path: .*/cmd/.*
|
||||||
|
linters:
|
||||||
|
- forbidigo
|
||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
enable:
|
enable:
|
||||||
- goimports # checks if the code and import statements are formatted according to the 'goimports' command
|
- goimports # checks if the code and import statements are formatted according to the 'goimports' command
|
||||||
@@ -73,7 +92,6 @@ linters:
|
|||||||
- godoclint # checks Golang's documentation practice
|
- godoclint # checks Golang's documentation practice
|
||||||
- godot # checks if comments end in a period
|
- godot # checks if comments end in a period
|
||||||
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
|
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
|
||||||
- goprintffuncname # checks that printf-like functions are named with f at the end
|
|
||||||
- gosec # inspects source code for security problems
|
- gosec # inspects source code for security problems
|
||||||
- govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
|
- govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
|
||||||
- iface # checks the incorrect use of interfaces, helping developers avoid interface pollution
|
- iface # checks the incorrect use of interfaces, helping developers avoid interface pollution
|
||||||
@@ -228,6 +246,13 @@ linters:
|
|||||||
# Such cases aren't reported by default.
|
# Such cases aren't reported by default.
|
||||||
# Default: false
|
# Default: false
|
||||||
check-type-assertions: true
|
check-type-assertions: true
|
||||||
|
exclude-functions:
|
||||||
|
- (*git.wntrmute.dev/kyle/goutils/dbg.DebugPrinter).Write
|
||||||
|
- git.wntrmute.dev/kyle/goutils/lib.Warn
|
||||||
|
- git.wntrmute.dev/kyle/goutils/lib.Warnx
|
||||||
|
- git.wntrmute.dev/kyle/goutils/lib.Err
|
||||||
|
- git.wntrmute.dev/kyle/goutils/lib.Errx
|
||||||
|
- (*git.wntrmute.dev/kyle/goutils/sbuf.Buffer).Write
|
||||||
|
|
||||||
exhaustive:
|
exhaustive:
|
||||||
# Program elements to check for exhaustiveness.
|
# Program elements to check for exhaustiveness.
|
||||||
@@ -319,6 +344,12 @@ linters:
|
|||||||
# https://github.com/godoc-lint/godoc-lint?tab=readme-ov-file#no-unused-link
|
# https://github.com/godoc-lint/godoc-lint?tab=readme-ov-file#no-unused-link
|
||||||
- no-unused-link
|
- no-unused-link
|
||||||
|
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G104 # handled by errcheck
|
||||||
|
- G301
|
||||||
|
- G306
|
||||||
|
|
||||||
govet:
|
govet:
|
||||||
# Enable all analyzers.
|
# Enable all analyzers.
|
||||||
# Default: false
|
# Default: false
|
||||||
@@ -341,11 +372,6 @@ linters:
|
|||||||
skip-single-param: true
|
skip-single-param: true
|
||||||
|
|
||||||
mnd:
|
mnd:
|
||||||
# List of function patterns to exclude from analysis.
|
|
||||||
# Values always ignored: `time.Date`,
|
|
||||||
# `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
|
|
||||||
# `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
|
|
||||||
# Default: []
|
|
||||||
ignored-functions:
|
ignored-functions:
|
||||||
- args.Error
|
- args.Error
|
||||||
- flag.Arg
|
- flag.Arg
|
||||||
@@ -359,6 +385,15 @@ linters:
|
|||||||
- os.WriteFile
|
- os.WriteFile
|
||||||
- prometheus.ExponentialBuckets.*
|
- prometheus.ExponentialBuckets.*
|
||||||
- prometheus.LinearBuckets
|
- prometheus.LinearBuckets
|
||||||
|
ignored-numbers:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 8
|
||||||
|
- 24
|
||||||
|
- 30
|
||||||
|
- 365
|
||||||
|
|
||||||
nakedret:
|
nakedret:
|
||||||
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
||||||
@@ -427,6 +462,10 @@ linters:
|
|||||||
# Omit embedded fields from selector expression.
|
# Omit embedded fields from selector expression.
|
||||||
# https://staticcheck.dev/docs/checks/#QF1008
|
# https://staticcheck.dev/docs/checks/#QF1008
|
||||||
- -QF1008
|
- -QF1008
|
||||||
|
# We often explicitly enable old/deprecated ciphers for research.
|
||||||
|
- -SA1019
|
||||||
|
# Covered by revive.
|
||||||
|
- -ST1003
|
||||||
|
|
||||||
usetesting:
|
usetesting:
|
||||||
# Enable/disable `os.TempDir()` detections.
|
# Enable/disable `os.TempDir()` detections.
|
||||||
@@ -445,10 +484,20 @@ linters:
|
|||||||
rules:
|
rules:
|
||||||
- path: 'ahash/ahash.go'
|
- path: 'ahash/ahash.go'
|
||||||
linters: [ staticcheck, gosec ]
|
linters: [ staticcheck, gosec ]
|
||||||
|
- path: 'twofactor/.*.go'
|
||||||
|
linters: [ exhaustive, mnd, revive ]
|
||||||
- path: 'backoff/backoff_test.go'
|
- path: 'backoff/backoff_test.go'
|
||||||
linters: [ testpackage ]
|
linters: [ testpackage ]
|
||||||
- path: 'dbg/dbg_test.go'
|
- path: 'dbg/dbg_test.go'
|
||||||
linters: [ testpackage ]
|
linters: [ testpackage ]
|
||||||
|
- path: 'log/logger.go'
|
||||||
|
linters: [ forbidigo ]
|
||||||
|
- path: 'logging/example_test.go'
|
||||||
|
linters: [ testableexamples ]
|
||||||
|
- path: 'main.go'
|
||||||
|
linters: [ forbidigo, mnd, reassign ]
|
||||||
|
- path: 'cmd/cruntar/main.go'
|
||||||
|
linters: [ unparam ]
|
||||||
- source: 'TODO'
|
- source: 'TODO'
|
||||||
linters: [ godot ]
|
linters: [ godot ]
|
||||||
- text: 'should have a package comment'
|
- text: 'should have a package comment'
|
||||||
@@ -470,4 +519,5 @@ linters:
|
|||||||
- goconst
|
- goconst
|
||||||
- gosec
|
- gosec
|
||||||
- noctx
|
- noctx
|
||||||
|
- reassign
|
||||||
- wrapcheck
|
- wrapcheck
|
||||||
|
|||||||
445
.goreleaser.yaml
Normal file
445
.goreleaser.yaml
Normal file
@@ -0,0 +1,445 @@
|
|||||||
|
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||||
|
# Make sure to check the documentation at https://goreleaser.com
|
||||||
|
|
||||||
|
# The lines below are called `modelines`. See `:help modeline`
|
||||||
|
# Feel free to remove those if you don't want/need to use them.
|
||||||
|
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||||
|
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
# You may remove this if you don't use go modules.
|
||||||
|
- go mod tidy
|
||||||
|
# you may remove this if you don't need go generate
|
||||||
|
- go generate ./...
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: atping
|
||||||
|
main: ./cmd/atping/main.go
|
||||||
|
binary: atping
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: ca-signed
|
||||||
|
main: ./cmd/ca-signed/main.go
|
||||||
|
binary: ca-signed
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: cert-bundler
|
||||||
|
main: ./cmd/cert-bundler/main.go
|
||||||
|
binary: cert-bundler
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: cert-revcheck
|
||||||
|
main: ./cmd/cert-revcheck/main.go
|
||||||
|
binary: cert-revcheck
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: certchain
|
||||||
|
main: ./cmd/certchain/main.go
|
||||||
|
binary: certchain
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: certdump
|
||||||
|
main: ./cmd/certdump/main.go
|
||||||
|
binary: certdump
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: certexpiry
|
||||||
|
main: ./cmd/certexpiry/main.go
|
||||||
|
binary: certexpiry
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: certser
|
||||||
|
main: ./cmd/certser/main.go
|
||||||
|
binary: certser
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: certverify
|
||||||
|
main: ./cmd/certverify/main.go
|
||||||
|
binary: certverify
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: clustersh
|
||||||
|
main: ./cmd/clustersh/main.go
|
||||||
|
binary: clustersh
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: cruntar
|
||||||
|
main: ./cmd/cruntar/main.go
|
||||||
|
binary: cruntar
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: csrpubdump
|
||||||
|
main: ./cmd/csrpubdump/main.go
|
||||||
|
binary: csrpubdump
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: data_sync
|
||||||
|
main: ./cmd/data_sync/main.go
|
||||||
|
binary: data_sync
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: diskimg
|
||||||
|
main: ./cmd/diskimg/main.go
|
||||||
|
binary: diskimg
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: dumpbytes
|
||||||
|
main: ./cmd/dumpbytes/main.go
|
||||||
|
binary: dumpbytes
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: eig
|
||||||
|
main: ./cmd/eig/main.go
|
||||||
|
binary: eig
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: fragment
|
||||||
|
main: ./cmd/fragment/main.go
|
||||||
|
binary: fragment
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: host
|
||||||
|
main: ./cmd/host/main.go
|
||||||
|
binary: host
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: jlp
|
||||||
|
main: ./cmd/jlp/main.go
|
||||||
|
binary: jlp
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: kgz
|
||||||
|
main: ./cmd/kgz/main.go
|
||||||
|
binary: kgz
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: minmax
|
||||||
|
main: ./cmd/minmax/main.go
|
||||||
|
binary: minmax
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: parts
|
||||||
|
main: ./cmd/parts/main.go
|
||||||
|
binary: parts
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: pem2bin
|
||||||
|
main: ./cmd/pem2bin/main.go
|
||||||
|
binary: pem2bin
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: pembody
|
||||||
|
main: ./cmd/pembody/main.go
|
||||||
|
binary: pembody
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: pemit
|
||||||
|
main: ./cmd/pemit/main.go
|
||||||
|
binary: pemit
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: readchain
|
||||||
|
main: ./cmd/readchain/main.go
|
||||||
|
binary: readchain
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: renfnv
|
||||||
|
main: ./cmd/renfnv/main.go
|
||||||
|
binary: renfnv
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: rhash
|
||||||
|
main: ./cmd/rhash/main.go
|
||||||
|
binary: rhash
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: rolldie
|
||||||
|
main: ./cmd/rolldie/main.go
|
||||||
|
binary: rolldie
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: showimp
|
||||||
|
main: ./cmd/showimp/main.go
|
||||||
|
binary: showimp
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: ski
|
||||||
|
main: ./cmd/ski/main.go
|
||||||
|
binary: ski
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: sprox
|
||||||
|
main: ./cmd/sprox/main.go
|
||||||
|
binary: sprox
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: stealchain
|
||||||
|
main: ./cmd/stealchain/main.go
|
||||||
|
binary: stealchain
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: stealchain-server
|
||||||
|
main: ./cmd/stealchain-server/main.go
|
||||||
|
binary: stealchain-server
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: subjhash
|
||||||
|
main: ./cmd/subjhash/main.go
|
||||||
|
binary: subjhash
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: tlsinfo
|
||||||
|
main: ./cmd/tlsinfo/main.go
|
||||||
|
binary: tlsinfo
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: tlskeypair
|
||||||
|
main: ./cmd/tlskeypair/main.go
|
||||||
|
binary: tlskeypair
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: utc
|
||||||
|
main: ./cmd/utc/main.go
|
||||||
|
binary: utc
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: yamll
|
||||||
|
main: ./cmd/yamll/main.go
|
||||||
|
binary: yamll
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
- id: zsearch
|
||||||
|
main: ./cmd/zsearch/main.go
|
||||||
|
binary: zsearch
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos: [linux, darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
ignore:
|
||||||
|
- goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- formats: [tar.gz]
|
||||||
|
# archive filename: name_version_os_arch
|
||||||
|
name_template: >-
|
||||||
|
{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
|
||||||
|
# use zip for windows archives
|
||||||
|
format_overrides:
|
||||||
|
- goos: windows
|
||||||
|
formats: [zip]
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
sort: asc
|
||||||
|
filters:
|
||||||
|
exclude:
|
||||||
|
- "^docs:"
|
||||||
|
- "^test:"
|
||||||
|
|
||||||
|
release:
|
||||||
|
github:
|
||||||
|
owner: kisom
|
||||||
|
name: goutils
|
||||||
|
footer: >-
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
||||||
237
CHANGELOG
237
CHANGELOG
@@ -1,27 +1,228 @@
|
|||||||
Release 1.2.1 - 2018-09-15
|
CHANGELOG
|
||||||
|
|
||||||
+ Add missing format argument to Errorf call in kgz.
|
v1.14.6 - 2025-11-18
|
||||||
|
|
||||||
Release 1.2.0 - 2018-09-15
|
Added:
|
||||||
|
- certlib: move tlskeypair functions into certlib.
|
||||||
|
|
||||||
+ Adds the kgz command line utility.
|
v1.14.5 - 2025-11-18
|
||||||
|
|
||||||
Release 1.1.0 - 2017-11-16
|
Changed:
|
||||||
|
- certlib/verify: fix a nil-pointer dereference.
|
||||||
|
|
||||||
+ A number of new command line utilities were added
|
v1.14.4 - 2025-11-18
|
||||||
|
|
||||||
+ atping
|
Added:
|
||||||
+ cruntar
|
- certlib/ski: add support for return certificate SKI.
|
||||||
+ renfnv
|
- certlib/verify: add support for verifying certificates.
|
||||||
+
|
|
||||||
+ ski
|
|
||||||
+ subjhash
|
|
||||||
+ yamll
|
|
||||||
|
|
||||||
+ new package: ahash
|
Changed:
|
||||||
+ package for loading hashes from an algorithm string
|
- certlib/dump: moved more functions into the dump package.
|
||||||
|
- cmd: many certificate-related commands had their functionality moved into
|
||||||
|
certlib.
|
||||||
|
|
||||||
+ new certificate loading functions in the lib package
|
v1.14.3 - 2025-11-18
|
||||||
|
|
||||||
+ new package: tee
|
Added:
|
||||||
+ emulates tee(1)
|
- certlib/dump: the certificate dumping functions have been moved into
|
||||||
|
their own package.
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- cmd/certdump: refactor out most of the functionality into certlib/dump.
|
||||||
|
- cmd/kgz: add extended metadata support.
|
||||||
|
|
||||||
|
v1.14.2 - 2025-11-18
|
||||||
|
|
||||||
|
Added:
|
||||||
|
- lib: add tooling for generating baseline TLS configs.
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- cmd: update all commands to allow the use strict TLS configs. Note that
|
||||||
|
many of these tools are intended for debugging broken or insecure TLS
|
||||||
|
systems, and the ability to support insecure TLS configurations is
|
||||||
|
important in this regard.
|
||||||
|
|
||||||
|
v1.14.1 - 2025-11-18
|
||||||
|
|
||||||
|
Added:
|
||||||
|
- build: add missing Dockerfile.
|
||||||
|
|
||||||
|
v1.14.0 - 2025-11-18
|
||||||
|
|
||||||
|
Added:
|
||||||
|
- lib/dialer: introduce proxy-aware dialers and helpers:
|
||||||
|
- NewNetDialer and NewTLSDialer honoring SOCKS5_PROXY, HTTPS_PROXY, HTTP_PROXY
|
||||||
|
(case-insensitive) with precedence SOCKS5 > HTTPS > HTTP.
|
||||||
|
- DialTCP and DialTLS convenience functions; DialTLS performs a TLS handshake
|
||||||
|
and returns a concrete *tls.Conn.
|
||||||
|
- NewHTTPClient: returns a proxy-aware *http.Client. Uses SOCKS5 proxy when
|
||||||
|
configured (disables HTTP(S) proxying to avoid double-proxying); otherwise
|
||||||
|
relies on http.ProxyFromEnvironment (respects HTTP(S)_PROXY and NO_PROXY).
|
||||||
|
- build: the releasse-docker.sh builds and pushes the correct Docker images.
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- cmd: migrate tools to new proxy-aware helpers where appropriate:
|
||||||
|
- certchain, stealchain, tlsinfo: use lib.DialTLS.
|
||||||
|
- cert-revcheck: use lib.DialTLS for site connects and a proxy-aware
|
||||||
|
HTTP client for OCSP/CRL fetches.
|
||||||
|
- rhash: use proxy-aware HTTP client for downloads.
|
||||||
|
- lib/fetch: migrate from certlib/fetch.go to lib/fetch.go and use DialTLS
|
||||||
|
under the hood.
|
||||||
|
- go.mod: add golang.org/x/net dependency (for SOCKS5 support) and align x/crypto.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- HTTP(S) proxy CONNECT supports optional basic auth via proxy URL credentials.
|
||||||
|
- HTTPS proxies are TLS-wrapped prior to CONNECT.
|
||||||
|
- Timeouts apply to TCP connects, proxy handshakes, and TLS handshakes; context
|
||||||
|
cancellation is honored.
|
||||||
|
- Some commands retain bespoke dialing (e.g., IPv6-only or unix sockets) and
|
||||||
|
were intentionally left unchanged.
|
||||||
|
|
||||||
|
v1.13.6 - 2025-11-18
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- build: removing gitea stuff.
|
||||||
|
|
||||||
|
v1.13.5 - 2025-11-18
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- build: updating goreleaser config.
|
||||||
|
|
||||||
|
v1.13.4 - 2025-11-18
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- build: updating goreleaser config.
|
||||||
|
|
||||||
|
v1.13.3 - 2025-11-18
|
||||||
|
|
||||||
|
Added:
|
||||||
|
- certlib: introduce `Fetcher` for retrieving certificates.
|
||||||
|
- lib: `HexEncode` gains a byte-slice output variant.
|
||||||
|
- build: add GoReleaser configuration.
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- cmd: migrate programs to use `certlib.Fetcher` for certificate retrieval
|
||||||
|
(includes `certdump`, `ski`, and others).
|
||||||
|
- cmd/ski: update display mode.
|
||||||
|
|
||||||
|
Misc:
|
||||||
|
- repository fixups and small cleanups.
|
||||||
|
|
||||||
|
v1.13.2 - 2025-11-17
|
||||||
|
|
||||||
|
Add:
|
||||||
|
- certlib/bundler: refactor certificate bundling from cmd/cert-bundler
|
||||||
|
into a separate package.
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- cmd/cert-bundler: refactor to use bundler package, and update Dockerfile.
|
||||||
|
|
||||||
|
v1.13.1 - 2025-11-17
|
||||||
|
|
||||||
|
Add:
|
||||||
|
- Dockerfile for cert-bundler.
|
||||||
|
|
||||||
|
v1.13.0 - 2025-11-16
|
||||||
|
|
||||||
|
Add:
|
||||||
|
- cmd/certser: print serial numbers for certificates.
|
||||||
|
- lib/HexEncode: add a new hex encode function handling multiple output
|
||||||
|
formats, including with and without colons.
|
||||||
|
|
||||||
|
v1.12.4 - 2025-11-16
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
|
||||||
|
- Linting fixes for twofactor that were previously masked.
|
||||||
|
|
||||||
|
v1.12.3 erroneously tagged and pushed
|
||||||
|
|
||||||
|
v1.12.2 - 2025-11-16
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
|
||||||
|
- add rsc.io/qr dependency for twofactor.
|
||||||
|
|
||||||
|
v1.12.1 - 2025-11-16
|
||||||
|
|
||||||
|
Changed:
|
||||||
|
- twofactor: Remove go.{mod,sum}.
|
||||||
|
|
||||||
|
v1.12.0 - 2025-11-16
|
||||||
|
|
||||||
|
Added
|
||||||
|
- twofactor: the github.com/kisom/twofactor repo has been subtree'd
|
||||||
|
into this repo.
|
||||||
|
|
||||||
|
v1.11.2 - 2025-11-16
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- cmd/ski, cmd/csrpubdump, cmd/tlskeypair: centralize
|
||||||
|
certificate/private-key/CSR parsing by reusing certlib helpers.
|
||||||
|
This reduces duplication and improves consistency across commands.
|
||||||
|
- csr: CSR parsing in the above commands now uses certlib.ParseCSR,
|
||||||
|
which verifies CSR signatures (behavioral hardening compared to
|
||||||
|
prior parsing without signature verification).
|
||||||
|
|
||||||
|
v1.11.1 - 2025-11-16
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- cmd: complete linting fixes across programs; no functional changes.
|
||||||
|
|
||||||
|
v1.11.0 - 2025-11-15
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cache/mru: introduce MRU cache implementation with timestamp utilities.
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- certlib: complete overhaul to simplify APIs and internals.
|
||||||
|
- repo: widespread linting cleanups across many packages (config, dbg, die,
|
||||||
|
fileutil, log/logging, mwc, sbuf, seekbuf, tee, testio, etc.).
|
||||||
|
- cmd: general program cleanups; `cert-bundler` lint fixes.
|
||||||
|
|
||||||
|
Removed
|
||||||
|
- rand: remove unused package.
|
||||||
|
- testutil: remove unused code.
|
||||||
|
|
||||||
|
|
||||||
|
v1.10.1 — 2025-11-15
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- certlib: major overhaul and refactor.
|
||||||
|
- repo: linter autofixes ahead of release.
|
||||||
|
|
||||||
|
|
||||||
|
v1.10.0 — 2025-11-14
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cmd: add `cert-revcheck` command.
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- ci/lint: add golangci-lint stage and initial cleanup.
|
||||||
|
|
||||||
|
|
||||||
|
v1.9.1 — 2025-11-15
|
||||||
|
|
||||||
|
Fixed
|
||||||
|
- die: correct calls to `die.With`.
|
||||||
|
|
||||||
|
|
||||||
|
v1.9.0 — 2025-11-14
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cmd: add `cert-bundler` tool.
|
||||||
|
|
||||||
|
Changed
|
||||||
|
- misc: minor updates and maintenance.
|
||||||
|
|
||||||
|
|
||||||
|
v1.8.1 — 2025-11-14
|
||||||
|
|
||||||
|
Added
|
||||||
|
- cmd: add `tlsinfo` tool.
|
||||||
|
|
||||||
|
|
||||||
|
v1.8.0 — 2025-11-14
|
||||||
|
|
||||||
|
Baseline
|
||||||
|
- Initial baseline for this changelog series.
|
||||||
|
|||||||
38
Dockerfile
Normal file
38
Dockerfile
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
# ----- Builder stage: build all cmd/... tools -----
|
||||||
|
FROM golang:1.24-alpine AS builder
|
||||||
|
|
||||||
|
# Install necessary build dependencies for fetching modules
|
||||||
|
RUN apk add --no-cache git ca-certificates && update-ca-certificates
|
||||||
|
|
||||||
|
WORKDIR /src
|
||||||
|
|
||||||
|
# Cache modules
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go mod download
|
||||||
|
|
||||||
|
# Copy the rest of the source
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build and install all commands under ./cmd/... into /out
|
||||||
|
ENV CGO_ENABLED=0
|
||||||
|
ENV GOBIN=/out
|
||||||
|
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go install ./cmd/...
|
||||||
|
|
||||||
|
# ----- Final runtime image: minimal alpine with tools installed -----
|
||||||
|
FROM alpine:3.20
|
||||||
|
|
||||||
|
# Ensure common utilities are present
|
||||||
|
RUN apk add --no-cache bash curl ca-certificates && update-ca-certificates
|
||||||
|
|
||||||
|
# Copy binaries from builder
|
||||||
|
COPY --from=builder /out/ /usr/local/bin/
|
||||||
|
|
||||||
|
# Working directory for mounting the host CWD
|
||||||
|
WORKDIR /work
|
||||||
|
|
||||||
|
# Default command shows available tools if run without args
|
||||||
|
CMD ["/bin/sh", "-lc", "echo 'Tools installed:' && ls -1 /usr/local/bin && echo '\nMount your project with: docker run --rm -it -v $PWD:/work IMAGE <tool> ...'"]
|
||||||
197
LICENSE
197
LICENSE
@@ -1,19 +1,194 @@
|
|||||||
Copyright (c) 2015-2023 Kyle Isom <kyle@tyrfingr.is>
|
Copyright 2025 K. Isom <kyle@imap.cc>
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
you may not use this file except in compliance with the License.
|
||||||
copyright notice and this permission notice appear in all copies.
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
=======================================================================
|
=======================================================================
|
||||||
|
|
||||||
The backoff package (written during my time at Cloudflare) is released
|
The backoff package (written during my time at Cloudflare) is released
|
||||||
under the following license:
|
under the following license:
|
||||||
|
|
||||||
|
|||||||
85
README.md
85
README.md
@@ -2,39 +2,52 @@ GOUTILS
|
|||||||
|
|
||||||
This is a collection of small utility code I've written in Go; the `cmd/`
|
This is a collection of small utility code I've written in Go; the `cmd/`
|
||||||
directory has a number of command-line utilities. Rather than keep all
|
directory has a number of command-line utilities. Rather than keep all
|
||||||
of these in superfluous repositories of their own, or rewriting them
|
of these in superfluous repositories of their own or rewriting them
|
||||||
for each project, I'm putting them here.
|
for each project, I'm putting them here.
|
||||||
|
|
||||||
The project can be built with the standard Go tooling, or it can be built
|
The project can be built with the standard Go tooling.
|
||||||
with Bazel.
|
|
||||||
|
|
||||||
Contents:
|
Contents:
|
||||||
|
|
||||||
ahash/ Provides hashes from string algorithm specifiers.
|
ahash/ Provides hashes from string algorithm specifiers.
|
||||||
assert/ Error handling, assertion-style.
|
assert/ Error handling, assertion-style.
|
||||||
backoff/ Implementation of an intelligent backoff strategy.
|
backoff/ Implementation of an intelligent backoff strategy.
|
||||||
|
cache/ Implementations of various caches.
|
||||||
|
lru/ Least-recently-used cache.
|
||||||
|
mru/ Most-recently-used cache.
|
||||||
|
certlib/ Library for working with TLS certificates.
|
||||||
cmd/
|
cmd/
|
||||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||||
certchain/ Display the certificate chain from a
|
ca-signed/ Validate whether a certificate is signed by a CA.
|
||||||
TLS connection.
|
cert-bundler/
|
||||||
|
Create certificate bundles from a source of PEM
|
||||||
|
certificates.
|
||||||
|
cert-revcheck/
|
||||||
|
Check whether a certificate has been revoked or is
|
||||||
|
expired.
|
||||||
|
certchain/ Display the certificate chain from a TLS connection.
|
||||||
certdump/ Dump certificate information.
|
certdump/ Dump certificate information.
|
||||||
certexpiry/ Print a list of certificate subjects and expiry times
|
certexpiry/ Print a list of certificate subjects and expiry times
|
||||||
or warn about certificates expiring within a certain
|
or warn about certificates expiring within a certain
|
||||||
window.
|
window.
|
||||||
certverify/ Verify a TLS X.509 certificate, optionally printing
|
certverify/ Verify a TLS X.509 certificate file, optionally printing
|
||||||
the time to expiry and checking for revocations.
|
the time to expiry and checking for revocations.
|
||||||
clustersh/ Run commands or transfer files across multiple
|
clustersh/ Run commands or transfer files across multiple
|
||||||
servers via SSH.
|
servers via SSH.
|
||||||
cruntar/ Untar an archive with hard links, copying instead of
|
cruntar/ (Un)tar an archive with hard links, copying instead of
|
||||||
linking.
|
linking.
|
||||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||||
data_sync/ Sync the user's homedir to external storage.
|
data_sync/ Sync the user's homedir to external storage.
|
||||||
diskimg/ Write a disk image to a device.
|
diskimg/ Write a disk image to a device.
|
||||||
|
dumpbytes/ Dump the contents of a file as hex bytes, printing it as
|
||||||
|
a Go []byte literal.
|
||||||
eig/ EEPROM image generator.
|
eig/ EEPROM image generator.
|
||||||
fragment/ Print a fragment of a file.
|
fragment/ Print a fragment of a file.
|
||||||
|
host/ Go imlpementation of the host(1) command.
|
||||||
jlp/ JSON linter/prettifier.
|
jlp/ JSON linter/prettifier.
|
||||||
kgz/ Custom gzip compressor / decompressor that handles 99%
|
kgz/ Custom gzip compressor / decompressor that handles 99%
|
||||||
of my use cases.
|
of my use cases.
|
||||||
|
minmax/ Generate a minmax code for use in uLisp.
|
||||||
parts/ Simple parts database management for my collection of
|
parts/ Simple parts database management for my collection of
|
||||||
electronic components.
|
electronic components.
|
||||||
pem2bin/ Dump the binary body of a PEM-encoded block.
|
pem2bin/ Dump the binary body of a PEM-encoded block.
|
||||||
@@ -44,37 +57,79 @@ Contents:
|
|||||||
in a bundle.
|
in a bundle.
|
||||||
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
||||||
rhash/ Compute the digest of remote files.
|
rhash/ Compute the digest of remote files.
|
||||||
|
rolldie/ Roll some dice.
|
||||||
showimp/ List the external (e.g. non-stdlib and outside the
|
showimp/ List the external (e.g. non-stdlib and outside the
|
||||||
current working directory) imports for a Go file.
|
current working directory) imports for a Go file.
|
||||||
ski Display the SKI for PEM-encoded TLS material.
|
ski Display the SKI for PEM-encoded TLS material.
|
||||||
sprox/ Simple TCP proxy.
|
sprox/ Simple TCP proxy.
|
||||||
stealchain/ Dump the verified chain from a TLS
|
stealchain/ Dump the verified chain from a TLS connection to a
|
||||||
connection to a server.
|
server.
|
||||||
stealchain- Dump the verified chain from a TLS
|
stealchain-server/
|
||||||
server/ connection from a client.
|
Dump the verified chain from a TLS connection from
|
||||||
|
from a client.
|
||||||
subjhash/ Print or match subject info from a certificate.
|
subjhash/ Print or match subject info from a certificate.
|
||||||
|
tlsinfo/ Print information about a TLS connection (the TLS version
|
||||||
|
and cipher suite).
|
||||||
tlskeypair/ Check whether a TLS certificate and key file match.
|
tlskeypair/ Check whether a TLS certificate and key file match.
|
||||||
utc/ Convert times to UTC.
|
utc/ Convert times to UTC.
|
||||||
yamll/ A small YAML linter.
|
yamll/ A small YAML linter.
|
||||||
|
zsearch/ Search for a string in directory of gzipped files.
|
||||||
config/ A simple global configuration system where configuration
|
config/ A simple global configuration system where configuration
|
||||||
data is pulled from a file or an environment variable
|
data is pulled from a file or an environment variable
|
||||||
transparently.
|
transparently.
|
||||||
|
iniconf/ A simple INI-style configuration system.
|
||||||
dbg/ A debug printer.
|
dbg/ A debug printer.
|
||||||
die/ Death of a program.
|
die/ Death of a program.
|
||||||
fileutil/ Common file functions.
|
fileutil/ Common file functions.
|
||||||
lib/ Commonly-useful functions for writing Go programs.
|
lib/ Commonly-useful functions for writing Go programs.
|
||||||
|
log/ A syslog library.
|
||||||
logging/ A logging library.
|
logging/ A logging library.
|
||||||
mwc/ MultiwriteCloser implementation.
|
mwc/ MultiwriteCloser implementation.
|
||||||
rand/ Utilities for working with math/rand.
|
|
||||||
sbuf/ A byte buffer that can be wiped.
|
sbuf/ A byte buffer that can be wiped.
|
||||||
seekbuf/ A read-seekable byte buffer.
|
seekbuf/ A read-seekable byte buffer.
|
||||||
syslog/ Syslog-type logging.
|
syslog/ Syslog-type logging.
|
||||||
tee/ Emulate tee(1)'s functionality in io.Writers.
|
tee/ Emulate tee(1)'s functionality in io.Writers.
|
||||||
testio/ Various I/O utilities useful during testing.
|
testio/ Various I/O utilities useful during testing.
|
||||||
testutil/ Various utility functions useful during testing.
|
twofactor/ Two-factor authentication.
|
||||||
|
|
||||||
|
|
||||||
Each program should have a small README in the directory with more
|
Each program should have a small README in the directory with more
|
||||||
information.
|
information.
|
||||||
|
|
||||||
All code here is licensed under the ISC license.
|
All code here is licensed under the Apache 2.0 license.
|
||||||
|
|
||||||
|
Error handling
|
||||||
|
--------------
|
||||||
|
|
||||||
|
This repo standardizes on Go 1.13+ error wrapping and matching. Libraries and
|
||||||
|
CLIs should:
|
||||||
|
|
||||||
|
- Wrap causes with context using `fmt.Errorf("context: %w", err)`.
|
||||||
|
- Use typed, structured errors from `certlib/certerr` for certificate-related
|
||||||
|
operations. These include a typed `*certerr.Error` with `Source` and `Kind`.
|
||||||
|
- Match errors programmatically:
|
||||||
|
- `errors.Is(err, certerr.ErrEncryptedPrivateKey)` to detect sentinel states.
|
||||||
|
- `errors.As(err, &e)` (where `var e *certerr.Error`) to inspect
|
||||||
|
`e.Source`/`e.Kind`.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
cert, err := certlib.LoadCertificate(path)
|
||||||
|
if err != nil {
|
||||||
|
// sentinel match:
|
||||||
|
if errors.Is(err, certerr.ErrEmptyCertificate) {
|
||||||
|
// handle empty input
|
||||||
|
}
|
||||||
|
|
||||||
|
// typed error match
|
||||||
|
var ce *certerr.Error
|
||||||
|
if errors.As(err, &ce) {
|
||||||
|
switch ce.Kind {
|
||||||
|
case certerr.KindParse:
|
||||||
|
// parse error handling
|
||||||
|
case certerr.KindLoad:
|
||||||
|
// file loading error handling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ func TestReset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const decay = 5 * time.Millisecond
|
const decay = time.Second
|
||||||
const maxDuration = 10 * time.Millisecond
|
const maxDuration = 10 * time.Millisecond
|
||||||
const interval = time.Millisecond
|
const interval = time.Millisecond
|
||||||
|
|
||||||
|
|||||||
179
cache/lru/lru.go
vendored
Normal file
179
cache/lru/lru.go
vendored
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
// Package lru implements a Least Recently Used cache.
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
type item[V any] struct {
|
||||||
|
V V
|
||||||
|
access int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Cache is a map that retains a limited number of items. It must be
|
||||||
|
// initialized with New, providing a maximum capacity for the cache.
|
||||||
|
// Only the least recently used items are retained.
|
||||||
|
type Cache[K comparable, V any] struct {
|
||||||
|
store map[K]*item[V]
|
||||||
|
access *timestamps[K]
|
||||||
|
cap int
|
||||||
|
clock clock.Clock
|
||||||
|
// All public methods that have the possibility of modifying the
|
||||||
|
// cache should lock it.
|
||||||
|
mtx *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New must be used to create a new Cache.
|
||||||
|
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||||
|
return &Cache[K, V]{
|
||||||
|
store: map[K]*item[V]{},
|
||||||
|
access: newTimestamps[K](icap),
|
||||||
|
cap: icap,
|
||||||
|
clock: clock.New(),
|
||||||
|
mtx: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||||
|
type StringKeyCache[V any] struct {
|
||||||
|
*Cache[string, V]
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStringKeyCache creates a new LRU cache keyed by string.
|
||||||
|
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||||
|
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) lock() {
|
||||||
|
c.mtx.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) unlock() {
|
||||||
|
c.mtx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items currently in the cache.
|
||||||
|
func (c *Cache[K, V]) Len() int {
|
||||||
|
return len(c.store)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evict should remove the least-recently-used cache item.
|
||||||
|
func (c *Cache[K, V]) evict() {
|
||||||
|
if c.access.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
k := c.access.K(0)
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictKey should remove the entry given by the key item.
|
||||||
|
func (c *Cache[K, V]) evictKey(k K) {
|
||||||
|
delete(c.store, k)
|
||||||
|
i, ok := c.access.Find(k)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.access.Delete(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) sanityCheck() {
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
panic(fmt.Sprintf("LRU cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||||
|
// data structures are consistent. It is not normally required, and it
|
||||||
|
// is primarily used in testing.
|
||||||
|
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
if err := c.access.ConsistencyCheck(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
return fmt.Errorf("lru: cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range c.access.ts {
|
||||||
|
itm, ok := c.store[c.access.K(i)]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("lru: key in access is not in store")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.access.T(i) != itm.access {
|
||||||
|
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||||
|
itm.access, c.access.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sort.IsSorted(c.access) {
|
||||||
|
return errors.New("lru: timestamps aren't sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store adds the value v to the cache under the k.
|
||||||
|
func (c *Cache[K, V]) Store(k K, v V) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
if len(c.store) == c.cap {
|
||||||
|
c.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.store[k]; ok {
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := &item[V]{
|
||||||
|
V: v,
|
||||||
|
access: c.clock.Now().UnixNano(),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k] = itm
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value stored in the cache. If the item isn't present,
|
||||||
|
// it will return false.
|
||||||
|
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
itm, ok := c.store[k]
|
||||||
|
if !ok {
|
||||||
|
var zero V
|
||||||
|
return zero, false
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k].access = c.clock.Now().UnixNano()
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
return itm.V, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has returns true if the cache has an entry for k. It will not update
|
||||||
|
// the timestamp on the item.
|
||||||
|
func (c *Cache[K, V]) Has(k K) bool {
|
||||||
|
// Don't need to lock as we don't modify anything.
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
_, ok := c.store[k]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
87
cache/lru/lru_internal_test.go
vendored
Normal file
87
cache/lru/lru_internal_test.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests mirror the MRU-style behavior present in this LRU package
|
||||||
|
// implementation (eviction removes the most-recently-used entry).
|
||||||
|
func TestBasicCacheEviction(t *testing.T) {
|
||||||
|
mock := clock.NewMock()
|
||||||
|
c := NewStringKeyCache[int](2)
|
||||||
|
c.clock = mock
|
||||||
|
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Len() != 0 {
|
||||||
|
t.Fatal("cache should have size 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.evict()
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Store("raven", 1)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 1 {
|
||||||
|
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("owl", 2)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("goat", 3)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this implementation evicts the most-recently-used item, inserting
|
||||||
|
// "goat" when full evicts "owl" (the most recent at that time).
|
||||||
|
mock.Add(time.Second)
|
||||||
|
if _, ok := c.Get("owl"); ok {
|
||||||
|
t.Fatal("store should not have an entry for owl (MRU-evicted)")
|
||||||
|
}
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("elk", 4)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("elk") {
|
||||||
|
t.Fatal("store should contain an entry for 'elk'")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before storing elk, keys were: raven (older), goat (newer). Evict MRU -> goat.
|
||||||
|
if !c.Has("raven") {
|
||||||
|
t.Fatal("store should contain an entry for 'raven'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Has("goat") {
|
||||||
|
t.Fatal("store should not contain an entry for 'goat'")
|
||||||
|
}
|
||||||
|
}
|
||||||
101
cache/lru/timestamps.go
vendored
Normal file
101
cache/lru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||||
|
// by timestamp.
|
||||||
|
|
||||||
|
type timestamp[K comparable] struct {
|
||||||
|
t int64
|
||||||
|
k K
|
||||||
|
}
|
||||||
|
|
||||||
|
type timestamps[K comparable] struct {
|
||||||
|
ts []timestamp[K]
|
||||||
|
cap int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||||
|
return ×tamps[K]{
|
||||||
|
ts: make([]timestamp[K], 0, icap),
|
||||||
|
cap: icap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) K(i int) K {
|
||||||
|
return ts.ts[i].k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) T(i int) int64 {
|
||||||
|
return ts.ts[i].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Len() int {
|
||||||
|
return len(ts.ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||||
|
return ts.ts[i].t > ts.ts[j].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Swap(i, j int) {
|
||||||
|
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
if ts.ts[i].k == k {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||||
|
i, ok := ts.Find(k)
|
||||||
|
if !ok {
|
||||||
|
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||||
|
sort.Sort(ts)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.ts[i].t = t
|
||||||
|
sort.Sort(ts)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||||
|
if !sort.IsSorted(ts) {
|
||||||
|
return errors.New("lru: timestamps are not sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := map[K]bool{}
|
||||||
|
for i := range ts.ts {
|
||||||
|
if keys[ts.ts[i].k] {
|
||||||
|
return fmt.Errorf("lru: duplicate key %v detected", ts.ts[i].k)
|
||||||
|
}
|
||||||
|
keys[ts.ts[i].k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) != len(ts.ts) {
|
||||||
|
return fmt.Errorf("lru: timestamp contains %d duplicate keys",
|
||||||
|
len(ts.ts)-len(keys))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Delete(i int) {
|
||||||
|
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
50
cache/lru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests validate timestamps ordering semantics for the LRU package.
|
||||||
|
// Note: The LRU timestamps are sorted with most-recent-first (descending by t).
|
||||||
|
func TestTimestamps(t *testing.T) {
|
||||||
|
ts := newTimestamps[string](3)
|
||||||
|
mock := clock.NewMock()
|
||||||
|
|
||||||
|
// raven
|
||||||
|
ts.Update("raven", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl, goat
|
||||||
|
mock.Add(time.Second)
|
||||||
|
ts.Update("goat", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make owl the most recent
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For LRU timestamps: most recent first. Expected order: owl, goat, raven.
|
||||||
|
if ts.K(0) != "owl" {
|
||||||
|
t.Fatalf("first key should be owl, have %s", ts.K(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(1) != "goat" {
|
||||||
|
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(2) != "raven" {
|
||||||
|
t.Fatalf("third key should be raven, have %s", ts.K(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
178
cache/mru/mru.go
vendored
Normal file
178
cache/mru/mru.go
vendored
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
type item[V any] struct {
|
||||||
|
V V
|
||||||
|
access int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Cache is a map that retains a limited number of items. It must be
|
||||||
|
// initialized with New, providing a maximum capacity for the cache.
|
||||||
|
// Only the most recently used items are retained.
|
||||||
|
type Cache[K comparable, V any] struct {
|
||||||
|
store map[K]*item[V]
|
||||||
|
access *timestamps[K]
|
||||||
|
cap int
|
||||||
|
clock clock.Clock
|
||||||
|
// All public methods that have the possibility of modifying the
|
||||||
|
// cache should lock it.
|
||||||
|
mtx *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New must be used to create a new Cache.
|
||||||
|
func New[K comparable, V any](icap int) *Cache[K, V] {
|
||||||
|
return &Cache[K, V]{
|
||||||
|
store: map[K]*item[V]{},
|
||||||
|
access: newTimestamps[K](icap),
|
||||||
|
cap: icap,
|
||||||
|
clock: clock.New(),
|
||||||
|
mtx: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringKeyCache is a convenience wrapper for cache keyed by string.
|
||||||
|
type StringKeyCache[V any] struct {
|
||||||
|
*Cache[string, V]
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStringKeyCache creates a new MRU cache keyed by string.
|
||||||
|
func NewStringKeyCache[V any](icap int) *StringKeyCache[V] {
|
||||||
|
return &StringKeyCache[V]{Cache: New[string, V](icap)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) lock() {
|
||||||
|
c.mtx.Lock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) unlock() {
|
||||||
|
c.mtx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items currently in the cache.
|
||||||
|
func (c *Cache[K, V]) Len() int {
|
||||||
|
return len(c.store)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evict should remove the least-recently-used cache item.
|
||||||
|
func (c *Cache[K, V]) evict() {
|
||||||
|
if c.access.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
k := c.access.K(0)
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictKey should remove the entry given by the key item.
|
||||||
|
func (c *Cache[K, V]) evictKey(k K) {
|
||||||
|
delete(c.store, k)
|
||||||
|
i, ok := c.access.Find(k)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.access.Delete(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[K, V]) sanityCheck() {
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
panic(fmt.Sprintf("MRU cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsistencyCheck runs a series of checks to ensure that the cache's
|
||||||
|
// data structures are consistent. It is not normally required, and it
|
||||||
|
// is primarily used in testing.
|
||||||
|
func (c *Cache[K, V]) ConsistencyCheck() error {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
if err := c.access.ConsistencyCheck(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != c.access.Len() {
|
||||||
|
return fmt.Errorf("mru: cache is out of sync; store len = %d, access len = %d",
|
||||||
|
len(c.store), c.access.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range c.access.ts {
|
||||||
|
itm, ok := c.store[c.access.K(i)]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("mru: key in access is not in store")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.access.T(i) != itm.access {
|
||||||
|
return fmt.Errorf("timestamps are out of sync (%d != %d)",
|
||||||
|
itm.access, c.access.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sort.IsSorted(c.access) {
|
||||||
|
return errors.New("mru: timestamps aren't sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store adds the value v to the cache under the k.
|
||||||
|
func (c *Cache[K, V]) Store(k K, v V) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
if len(c.store) == c.cap {
|
||||||
|
c.evict()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.store[k]; ok {
|
||||||
|
c.evictKey(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := &item[V]{
|
||||||
|
V: v,
|
||||||
|
access: c.clock.Now().UnixNano(),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k] = itm
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value stored in the cache. If the item isn't present,
|
||||||
|
// it will return false.
|
||||||
|
func (c *Cache[K, V]) Get(k K) (V, bool) {
|
||||||
|
c.lock()
|
||||||
|
defer c.unlock()
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
itm, ok := c.store[k]
|
||||||
|
if !ok {
|
||||||
|
var zero V
|
||||||
|
return zero, false
|
||||||
|
}
|
||||||
|
|
||||||
|
c.store[k].access = c.clock.Now().UnixNano()
|
||||||
|
c.access.Update(k, itm.access)
|
||||||
|
return itm.V, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has returns true if the cache has an entry for k. It will not update
|
||||||
|
// the timestamp on the item.
|
||||||
|
func (c *Cache[K, V]) Has(k K) bool {
|
||||||
|
// Don't need to lock as we don't modify anything.
|
||||||
|
|
||||||
|
c.sanityCheck()
|
||||||
|
|
||||||
|
_, ok := c.store[k]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
92
cache/mru/mru_internal_test.go
vendored
Normal file
92
cache/mru/mru_internal_test.go
vendored
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBasicCacheEviction(t *testing.T) {
|
||||||
|
mock := clock.NewMock()
|
||||||
|
c := NewStringKeyCache[int](2)
|
||||||
|
c.clock = mock
|
||||||
|
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Len() != 0 {
|
||||||
|
t.Fatal("cache should have size 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.evict()
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Store("raven", 1)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 1 {
|
||||||
|
t.Fatalf("store should have length=1, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("owl", 2)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("goat", 3)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.store) != 2 {
|
||||||
|
t.Fatalf("store should have length=2, have length=%d", len(c.store))
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
v, ok := c.Get("owl")
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("store should have an entry for owl")
|
||||||
|
}
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
itm := v
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if itm != 2 {
|
||||||
|
t.Fatalf("stored item should be 2, have %d", itm)
|
||||||
|
}
|
||||||
|
|
||||||
|
mock.Add(time.Second)
|
||||||
|
c.Store("elk", 4)
|
||||||
|
if err := c.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("elk") {
|
||||||
|
t.Fatal("store should contain an entry for 'elk'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.Has("owl") {
|
||||||
|
t.Fatal("store should contain an entry for 'owl'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Has("goat") {
|
||||||
|
t.Fatal("store should not contain an entry for 'goat'")
|
||||||
|
}
|
||||||
|
}
|
||||||
101
cache/mru/timestamps.go
vendored
Normal file
101
cache/mru/timestamps.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// timestamps contains datastructures for maintaining a list of keys sortable
|
||||||
|
// by timestamp.
|
||||||
|
|
||||||
|
type timestamp[K comparable] struct {
|
||||||
|
t int64
|
||||||
|
k K
|
||||||
|
}
|
||||||
|
|
||||||
|
type timestamps[K comparable] struct {
|
||||||
|
ts []timestamp[K]
|
||||||
|
cap int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimestamps[K comparable](icap int) *timestamps[K] {
|
||||||
|
return ×tamps[K]{
|
||||||
|
ts: make([]timestamp[K], 0, icap),
|
||||||
|
cap: icap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) K(i int) K {
|
||||||
|
return ts.ts[i].k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) T(i int) int64 {
|
||||||
|
return ts.ts[i].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Len() int {
|
||||||
|
return len(ts.ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Less(i, j int) bool {
|
||||||
|
return ts.ts[i].t < ts.ts[j].t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Swap(i, j int) {
|
||||||
|
ts.ts[i], ts.ts[j] = ts.ts[j], ts.ts[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Find(k K) (int, bool) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
if ts.ts[i].k == k {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Update(k K, t int64) bool {
|
||||||
|
i, ok := ts.Find(k)
|
||||||
|
if !ok {
|
||||||
|
ts.ts = append(ts.ts, timestamp[K]{t, k})
|
||||||
|
sort.Sort(ts)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.ts[i].t = t
|
||||||
|
sort.Sort(ts)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) ConsistencyCheck() error {
|
||||||
|
if !sort.IsSorted(ts) {
|
||||||
|
return errors.New("mru: timestamps are not sorted")
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := map[K]bool{}
|
||||||
|
for i := range ts.ts {
|
||||||
|
if keys[ts.ts[i].k] {
|
||||||
|
return fmt.Errorf("duplicate key %v detected", ts.ts[i].k)
|
||||||
|
}
|
||||||
|
keys[ts.ts[i].k] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(keys) != len(ts.ts) {
|
||||||
|
return fmt.Errorf("mru: timestamp contains %d duplicate keys",
|
||||||
|
len(ts.ts)-len(keys))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Delete(i int) {
|
||||||
|
ts.ts = append(ts.ts[:i], ts.ts[i+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *timestamps[K]) Dump(w io.Writer) {
|
||||||
|
for i := range ts.ts {
|
||||||
|
fmt.Fprintf(w, "%d: %v, %d\n", i, ts.K(i), ts.T(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
49
cache/mru/timestamps_internal_test.go
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/clock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimestamps(t *testing.T) {
|
||||||
|
ts := newTimestamps[string](3)
|
||||||
|
mock := clock.NewMock()
|
||||||
|
|
||||||
|
// raven
|
||||||
|
ts.Update("raven", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
// raven, owl, goat
|
||||||
|
mock.Add(time.Second)
|
||||||
|
ts.Update("goat", mock.Now().UnixNano())
|
||||||
|
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
mock.Add(time.Millisecond)
|
||||||
|
|
||||||
|
// raven, goat, owl
|
||||||
|
ts.Update("owl", mock.Now().UnixNano())
|
||||||
|
if err := ts.ConsistencyCheck(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// at this point, the keys should be raven, goat, owl.
|
||||||
|
if ts.K(0) != "raven" {
|
||||||
|
t.Fatalf("first key should be raven, have %s", ts.K(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(1) != "goat" {
|
||||||
|
t.Fatalf("second key should be goat, have %s", ts.K(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.K(2) != "owl" {
|
||||||
|
t.Fatalf("third key should be owl, have %s", ts.K(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
677
certlib/bundler/bundler.go
Normal file
677
certlib/bundler/bundler.go
Normal file
@@ -0,0 +1,677 @@
|
|||||||
|
package bundler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultFileMode = 0644
|
||||||
|
|
||||||
|
// Config represents the top-level YAML configuration.
|
||||||
|
type Config struct {
|
||||||
|
Config struct {
|
||||||
|
Hashes string `yaml:"hashes"`
|
||||||
|
Expiry string `yaml:"expiry"`
|
||||||
|
} `yaml:"config"`
|
||||||
|
Chains map[string]ChainGroup `yaml:"chains"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainGroup represents a named group of certificate chains.
|
||||||
|
type ChainGroup struct {
|
||||||
|
Certs []CertChain `yaml:"certs"`
|
||||||
|
Outputs Outputs `yaml:"outputs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CertChain represents a root certificate and its intermediates.
|
||||||
|
type CertChain struct {
|
||||||
|
Root string `yaml:"root"`
|
||||||
|
Intermediates []string `yaml:"intermediates"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outputs defines output format options.
|
||||||
|
type Outputs struct {
|
||||||
|
IncludeSingle bool `yaml:"include_single"`
|
||||||
|
IncludeIndividual bool `yaml:"include_individual"`
|
||||||
|
Manifest bool `yaml:"manifest"`
|
||||||
|
Formats []string `yaml:"formats"`
|
||||||
|
Encoding string `yaml:"encoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var formatExtensions = map[string]string{
|
||||||
|
"zip": ".zip",
|
||||||
|
"tgz": ".tar.gz",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run performs the bundling operation given a config file path and an output directory.
|
||||||
|
func Run(configFile string, outputDir string) error {
|
||||||
|
if configFile == "" {
|
||||||
|
return errors.New("configuration file required")
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := loadConfig(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expiryDuration := 365 * 24 * time.Hour
|
||||||
|
if cfg.Config.Expiry != "" {
|
||||||
|
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing expiry: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.MkdirAll(outputDir, 0750); err != nil {
|
||||||
|
return fmt.Errorf("creating output directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
totalFormats := 0
|
||||||
|
for _, group := range cfg.Chains {
|
||||||
|
totalFormats += len(group.Outputs.Formats)
|
||||||
|
}
|
||||||
|
createdFiles := make([]string, 0, totalFormats)
|
||||||
|
for groupName, group := range cfg.Chains {
|
||||||
|
files, perr := processChainGroup(groupName, group, expiryDuration, outputDir)
|
||||||
|
if perr != nil {
|
||||||
|
return fmt.Errorf("processing chain group %s: %w", groupName, perr)
|
||||||
|
}
|
||||||
|
createdFiles = append(createdFiles, files...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Config.Hashes != "" {
|
||||||
|
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||||
|
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||||
|
return fmt.Errorf("generating hash file: %w", gerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadConfig(path string) (*Config, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg Config
|
||||||
|
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||||
|
return nil, uerr
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDuration(s string) (time.Duration, error) {
|
||||||
|
// Support simple formats like "1y", "6m", "30d"
|
||||||
|
if len(s) < 2 {
|
||||||
|
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
unit := s[len(s)-1]
|
||||||
|
value := s[:len(s)-1]
|
||||||
|
|
||||||
|
var multiplier time.Duration
|
||||||
|
switch unit {
|
||||||
|
case 'y', 'Y':
|
||||||
|
multiplier = 365 * 24 * time.Hour
|
||||||
|
case 'm', 'M':
|
||||||
|
multiplier = 30 * 24 * time.Hour
|
||||||
|
case 'd', 'D':
|
||||||
|
multiplier = 24 * time.Hour
|
||||||
|
default:
|
||||||
|
return time.ParseDuration(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
var num int
|
||||||
|
_, err := fmt.Sscanf(value, "%d", &num)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Duration(num) * multiplier, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func processChainGroup(
|
||||||
|
groupName string,
|
||||||
|
group ChainGroup,
|
||||||
|
expiryDuration time.Duration,
|
||||||
|
outputDir string,
|
||||||
|
) ([]string, error) {
|
||||||
|
// Default encoding to "pem" if not specified
|
||||||
|
encoding := group.Outputs.Encoding
|
||||||
|
if encoding == "" {
|
||||||
|
encoding = "pem"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect certificates from all chains in the group
|
||||||
|
singleFileCerts, individualCerts, sourcePaths, err := loadAndCollectCerts(
|
||||||
|
group.Certs,
|
||||||
|
group.Outputs,
|
||||||
|
expiryDuration,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare files for inclusion in archives
|
||||||
|
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, sourcePaths, group.Outputs, encoding)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create archives for the entire group
|
||||||
|
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles, outputDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return createdFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||||
|
func loadAndCollectCerts(
|
||||||
|
chains []CertChain,
|
||||||
|
outputs Outputs,
|
||||||
|
expiryDuration time.Duration,
|
||||||
|
) ([]*x509.Certificate, []certWithPath, []string, error) {
|
||||||
|
var singleFileCerts []*x509.Certificate
|
||||||
|
var individualCerts []certWithPath
|
||||||
|
var sourcePaths []string
|
||||||
|
|
||||||
|
for _, chain := range chains {
|
||||||
|
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||||
|
if cerr != nil {
|
||||||
|
return nil, nil, nil, cerr
|
||||||
|
}
|
||||||
|
if len(s) > 0 {
|
||||||
|
singleFileCerts = append(singleFileCerts, s...)
|
||||||
|
}
|
||||||
|
if len(i) > 0 {
|
||||||
|
individualCerts = append(individualCerts, i...)
|
||||||
|
}
|
||||||
|
// Record source paths for timestamp preservation
|
||||||
|
// Only append when loading succeeded
|
||||||
|
sourcePaths = append(sourcePaths, chain.Root)
|
||||||
|
sourcePaths = append(sourcePaths, chain.Intermediates...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return singleFileCerts, individualCerts, sourcePaths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||||
|
func collectFromChain(
|
||||||
|
chain CertChain,
|
||||||
|
outputs Outputs,
|
||||||
|
expiryDuration time.Duration,
|
||||||
|
) (
|
||||||
|
[]*x509.Certificate,
|
||||||
|
[]certWithPath,
|
||||||
|
error,
|
||||||
|
) {
|
||||||
|
var single []*x509.Certificate
|
||||||
|
var indiv []certWithPath
|
||||||
|
|
||||||
|
// Load root certificate
|
||||||
|
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||||
|
if rerr != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiry for root
|
||||||
|
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||||
|
|
||||||
|
// Add root to collections if needed
|
||||||
|
if outputs.IncludeSingle {
|
||||||
|
single = append(single, rootCert)
|
||||||
|
}
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load and validate intermediates
|
||||||
|
for _, intPath := range chain.Intermediates {
|
||||||
|
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||||
|
if lerr != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate that intermediate is signed by root
|
||||||
|
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||||
|
return nil, nil, fmt.Errorf(
|
||||||
|
"intermediate %s is not properly signed by root %s: %w",
|
||||||
|
intPath,
|
||||||
|
chain.Root,
|
||||||
|
sigErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check expiry for intermediate
|
||||||
|
checkExpiry(intPath, intCert, expiryDuration)
|
||||||
|
|
||||||
|
// Add intermediate to collections if needed
|
||||||
|
if outputs.IncludeSingle {
|
||||||
|
single = append(single, intCert)
|
||||||
|
}
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return single, indiv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareArchiveFiles prepares all files to be included in archives.
|
||||||
|
func prepareArchiveFiles(
|
||||||
|
singleFileCerts []*x509.Certificate,
|
||||||
|
individualCerts []certWithPath,
|
||||||
|
sourcePaths []string,
|
||||||
|
outputs Outputs,
|
||||||
|
encoding string,
|
||||||
|
) ([]fileEntry, error) {
|
||||||
|
var archiveFiles []fileEntry
|
||||||
|
|
||||||
|
// Track used filenames to avoid collisions inside archives
|
||||||
|
usedNames := make(map[string]int)
|
||||||
|
|
||||||
|
// Handle a single bundle file
|
||||||
|
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||||
|
bundleTime := maxModTime(sourcePaths)
|
||||||
|
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||||
|
}
|
||||||
|
for i := range files {
|
||||||
|
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||||
|
files[i].modTime = bundleTime
|
||||||
|
// Best-effort: we do not have a portable birth/creation time.
|
||||||
|
// Use the same timestamp for created time to track deterministically.
|
||||||
|
files[i].createTime = bundleTime
|
||||||
|
}
|
||||||
|
archiveFiles = append(archiveFiles, files...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle individual files
|
||||||
|
if outputs.IncludeIndividual {
|
||||||
|
for _, cp := range individualCerts {
|
||||||
|
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||||
|
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||||
|
}
|
||||||
|
mt := fileModTime(cp.path)
|
||||||
|
for i := range files {
|
||||||
|
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||||
|
files[i].modTime = mt
|
||||||
|
files[i].createTime = mt
|
||||||
|
}
|
||||||
|
archiveFiles = append(archiveFiles, files...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate manifest if requested
|
||||||
|
if outputs.Manifest {
|
||||||
|
manifestContent := generateManifest(archiveFiles)
|
||||||
|
manifestName := makeUniqueName("MANIFEST", usedNames)
|
||||||
|
mt := maxModTime(sourcePaths)
|
||||||
|
archiveFiles = append(archiveFiles, fileEntry{
|
||||||
|
name: manifestName,
|
||||||
|
content: manifestContent,
|
||||||
|
modTime: mt,
|
||||||
|
createTime: mt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return archiveFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createArchiveFiles creates archive files in the specified formats.
|
||||||
|
func createArchiveFiles(
|
||||||
|
groupName string,
|
||||||
|
formats []string,
|
||||||
|
archiveFiles []fileEntry,
|
||||||
|
outputDir string,
|
||||||
|
) ([]string, error) {
|
||||||
|
createdFiles := make([]string, 0, len(formats))
|
||||||
|
|
||||||
|
for _, format := range formats {
|
||||||
|
ext, ok := formatExtensions[format]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||||
|
}
|
||||||
|
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||||
|
switch format {
|
||||||
|
case "zip":
|
||||||
|
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||||
|
}
|
||||||
|
case "tgz":
|
||||||
|
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||||
|
}
|
||||||
|
createdFiles = append(createdFiles, archivePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return createdFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||||
|
now := time.Now()
|
||||||
|
expiryThreshold := now.Add(expiryDuration)
|
||||||
|
|
||||||
|
if cert.NotAfter.Before(expiryThreshold) {
|
||||||
|
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||||
|
if daysUntilExpiry < 0 {
|
||||||
|
fmt.Fprintf(
|
||||||
|
os.Stderr,
|
||||||
|
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||||
|
path,
|
||||||
|
-daysUntilExpiry,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileEntry struct {
|
||||||
|
name string
|
||||||
|
content []byte
|
||||||
|
modTime time.Time
|
||||||
|
createTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type certWithPath struct {
|
||||||
|
cert *x509.Certificate
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||||
|
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||||
|
func encodeCertsToFiles(
|
||||||
|
certs []*x509.Certificate,
|
||||||
|
baseName string,
|
||||||
|
encoding string,
|
||||||
|
isSingle bool,
|
||||||
|
) ([]fileEntry, error) {
|
||||||
|
var files []fileEntry
|
||||||
|
|
||||||
|
switch encoding {
|
||||||
|
case "pem":
|
||||||
|
pemContent := encodeCertsToPEM(certs)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".pem",
|
||||||
|
content: pemContent,
|
||||||
|
})
|
||||||
|
case "der":
|
||||||
|
if isSingle {
|
||||||
|
// For single file in DER, concatenate all cert DER bytes
|
||||||
|
var derContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
derContent = append(derContent, cert.Raw...)
|
||||||
|
}
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: derContent,
|
||||||
|
})
|
||||||
|
} else if len(certs) > 0 {
|
||||||
|
// Individual DER file (should only have one cert)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: certs[0].Raw,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case "both":
|
||||||
|
// Add PEM version
|
||||||
|
pemContent := encodeCertsToPEM(certs)
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".pem",
|
||||||
|
content: pemContent,
|
||||||
|
})
|
||||||
|
// Add DER version
|
||||||
|
if isSingle {
|
||||||
|
var derContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
derContent = append(derContent, cert.Raw...)
|
||||||
|
}
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: derContent,
|
||||||
|
})
|
||||||
|
} else if len(certs) > 0 {
|
||||||
|
files = append(files, fileEntry{
|
||||||
|
name: baseName + ".crt",
|
||||||
|
content: certs[0].Raw,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeCertsToPEM encodes certificates to PEM format.
|
||||||
|
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||||
|
var pemContent []byte
|
||||||
|
for _, cert := range certs {
|
||||||
|
pemBlock := &pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: cert.Raw,
|
||||||
|
}
|
||||||
|
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||||
|
}
|
||||||
|
return pemContent
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateManifest(files []fileEntry) []byte {
|
||||||
|
// Build a sorted list of files by filename to ensure deterministic manifest ordering
|
||||||
|
sorted := make([]fileEntry, 0, len(files))
|
||||||
|
for _, f := range files {
|
||||||
|
// Defensive: skip any existing manifest entry
|
||||||
|
if f.name == "MANIFEST" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sorted = append(sorted, f)
|
||||||
|
}
|
||||||
|
sort.Slice(sorted, func(i, j int) bool { return sorted[i].name < sorted[j].name })
|
||||||
|
|
||||||
|
var manifest strings.Builder
|
||||||
|
for _, file := range sorted {
|
||||||
|
hash := sha256.Sum256(file.content)
|
||||||
|
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||||
|
}
|
||||||
|
return []byte(manifest.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||||
|
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||||
|
for _, c := range closers {
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cerr := c.Close(); cerr != nil {
|
||||||
|
baseErr = errors.Join(baseErr, cerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return baseErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func createZipArchive(path string, files []fileEntry) error {
|
||||||
|
f, zerr := os.Create(path)
|
||||||
|
if zerr != nil {
|
||||||
|
return zerr
|
||||||
|
}
|
||||||
|
|
||||||
|
w := zip.NewWriter(f)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
hdr := &zip.FileHeader{
|
||||||
|
Name: file.name,
|
||||||
|
Method: zip.Deflate,
|
||||||
|
}
|
||||||
|
if !file.modTime.IsZero() {
|
||||||
|
hdr.SetModTime(file.modTime)
|
||||||
|
}
|
||||||
|
fw, werr := w.CreateHeader(hdr)
|
||||||
|
if werr != nil {
|
||||||
|
return closeWithErr(werr, w, f)
|
||||||
|
}
|
||||||
|
if _, werr = fw.Write(file.content); werr != nil {
|
||||||
|
return closeWithErr(werr, w, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check errors on close operations
|
||||||
|
if cerr := w.Close(); cerr != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTarGzArchive(path string, files []fileEntry) error {
|
||||||
|
f, terr := os.Create(path)
|
||||||
|
if terr != nil {
|
||||||
|
return terr
|
||||||
|
}
|
||||||
|
|
||||||
|
gw := gzip.NewWriter(f)
|
||||||
|
tw := tar.NewWriter(gw)
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: file.name,
|
||||||
|
Uid: 0,
|
||||||
|
Gid: 0,
|
||||||
|
Mode: defaultFileMode,
|
||||||
|
Size: int64(len(file.content)),
|
||||||
|
ModTime: func() time.Time {
|
||||||
|
if file.modTime.IsZero() {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
return file.modTime
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
// Set additional times if supported
|
||||||
|
hdr.AccessTime = hdr.ModTime
|
||||||
|
if !file.createTime.IsZero() {
|
||||||
|
hdr.ChangeTime = file.createTime
|
||||||
|
} else {
|
||||||
|
hdr.ChangeTime = hdr.ModTime
|
||||||
|
}
|
||||||
|
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||||
|
return closeWithErr(herr, tw, gw, f)
|
||||||
|
}
|
||||||
|
if _, werr := tw.Write(file.content); werr != nil {
|
||||||
|
return closeWithErr(werr, tw, gw, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check errors on close operations in the correct order
|
||||||
|
if cerr := tw.Close(); cerr != nil {
|
||||||
|
_ = gw.Close()
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
if cerr := gw.Close(); cerr != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateHashFile(path string, files []string) error {
|
||||||
|
f, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
data, rerr := os.ReadFile(file)
|
||||||
|
if rerr != nil {
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.Sum256(data)
|
||||||
|
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeUniqueName ensures that each file name within the archive is unique by appending
|
||||||
|
// an incremental numeric suffix before the extension when collisions occur.
|
||||||
|
// Example: "root.pem" -> "root-2.pem", "root-3.pem", etc.
|
||||||
|
func makeUniqueName(name string, used map[string]int) string {
|
||||||
|
// If unused, mark and return as-is
|
||||||
|
if _, ok := used[name]; !ok {
|
||||||
|
used[name] = 1
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
ext := filepath.Ext(name)
|
||||||
|
base := strings.TrimSuffix(name, ext)
|
||||||
|
// Track a counter per base+ext key
|
||||||
|
key := base + ext
|
||||||
|
counter := max(used[key], 1)
|
||||||
|
for {
|
||||||
|
counter++
|
||||||
|
candidate := fmt.Sprintf("%s-%d%s", base, counter, ext)
|
||||||
|
if _, exists := used[candidate]; !exists {
|
||||||
|
used[key] = counter
|
||||||
|
used[candidate] = 1
|
||||||
|
return candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileModTime returns the file's modification time, or time.Now() if stat fails.
|
||||||
|
func fileModTime(path string) time.Time {
|
||||||
|
fi, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
return fi.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxModTime returns the latest modification time across provided paths.
|
||||||
|
// If the list is empty or stats fail, returns time.Now().
|
||||||
|
func maxModTime(paths []string) time.Time {
|
||||||
|
var zero time.Time
|
||||||
|
maxTime := zero
|
||||||
|
for _, p := range paths {
|
||||||
|
fi, err := os.Stat(p)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mt := fi.ModTime()
|
||||||
|
if maxTime.IsZero() || mt.After(maxTime) {
|
||||||
|
maxTime = mt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if maxTime.IsZero() {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
return maxTime
|
||||||
|
}
|
||||||
33
certlib/certerr/doc.go
Normal file
33
certlib/certerr/doc.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
// Package certerr provides typed errors and helpers for certificate-related
|
||||||
|
// operations across the repository. It standardizes error construction and
|
||||||
|
// matching so callers can reliably branch on error source/kind using the
|
||||||
|
// Go 1.13+ `errors.Is` and `errors.As` helpers.
|
||||||
|
//
|
||||||
|
// Guidelines
|
||||||
|
// - Always wrap underlying causes using the helper constructors or with
|
||||||
|
// fmt.Errorf("context: %w", err).
|
||||||
|
// - Do not include sensitive data (keys, passwords, tokens) in error
|
||||||
|
// messages; add only non-sensitive, actionable context.
|
||||||
|
// - Prefer programmatic checks via errors.Is (for sentinel errors) and
|
||||||
|
// errors.As (to retrieve *certerr.Error) rather than relying on error
|
||||||
|
// string contents.
|
||||||
|
//
|
||||||
|
// Typical usage
|
||||||
|
//
|
||||||
|
// if err := doParse(); err != nil {
|
||||||
|
// return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Callers may branch on error kinds and sources:
|
||||||
|
//
|
||||||
|
// var e *certerr.Error
|
||||||
|
// if errors.As(err, &e) {
|
||||||
|
// switch e.Kind {
|
||||||
|
// case certerr.KindParse:
|
||||||
|
// // handle parse error
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Sentinel errors are provided for common conditions like
|
||||||
|
// `certerr.ErrEncryptedPrivateKey` and can be matched with `errors.Is`.
|
||||||
|
package certerr
|
||||||
@@ -37,43 +37,84 @@ const (
|
|||||||
ErrorSourceKeypair ErrorSourceType = 5
|
ErrorSourceKeypair ErrorSourceType = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
// InvalidPEMType is used to indicate that we were expecting one type of PEM
|
// ErrorKind is a broad classification describing what went wrong.
|
||||||
|
type ErrorKind uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
KindParse ErrorKind = iota + 1
|
||||||
|
KindDecode
|
||||||
|
KindVerify
|
||||||
|
KindLoad
|
||||||
|
)
|
||||||
|
|
||||||
|
func (k ErrorKind) String() string {
|
||||||
|
switch k {
|
||||||
|
case KindParse:
|
||||||
|
return "parse"
|
||||||
|
case KindDecode:
|
||||||
|
return "decode"
|
||||||
|
case KindVerify:
|
||||||
|
return "verify"
|
||||||
|
case KindLoad:
|
||||||
|
return "load"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is a typed, wrapped error with structured context for programmatic checks.
|
||||||
|
// It implements error and supports errors.Is/As via Unwrap.
|
||||||
|
type Error struct {
|
||||||
|
Source ErrorSourceType // which domain produced the error (certificate, private key, etc.)
|
||||||
|
Kind ErrorKind // operation category (parse, decode, verify, load)
|
||||||
|
Op string // optional operation or function name
|
||||||
|
Err error // wrapped cause
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
// Keep message format consistent with existing helpers: "failed to <kind> <source>: <err>"
|
||||||
|
// Do not include Op by default to preserve existing output expectations.
|
||||||
|
return fmt.Sprintf("failed to %s %s: %v", e.Kind.String(), e.Source.String(), e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// InvalidPEMTypeError is used to indicate that we were expecting one type of PEM
|
||||||
// file, but saw another.
|
// file, but saw another.
|
||||||
type InvalidPEMType struct {
|
type InvalidPEMTypeError struct {
|
||||||
have string
|
have string
|
||||||
want []string
|
want []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *InvalidPEMType) Error() string {
|
func (err *InvalidPEMTypeError) Error() string {
|
||||||
if len(err.want) == 1 {
|
if len(err.want) == 1 {
|
||||||
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
return fmt.Sprintf("invalid PEM type: have %s, expected %s", err.have, err.want[0])
|
||||||
} else {
|
|
||||||
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
|
||||||
}
|
}
|
||||||
|
return fmt.Sprintf("invalid PEM type: have %s, expected one of %s", err.have, strings.Join(err.want, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidPEMType returns a new InvalidPEMType error.
|
// ErrInvalidPEMType returns a new InvalidPEMTypeError error.
|
||||||
func ErrInvalidPEMType(have string, want ...string) error {
|
func ErrInvalidPEMType(have string, want ...string) error {
|
||||||
return &InvalidPEMType{
|
return &InvalidPEMTypeError{
|
||||||
have: have,
|
have: have,
|
||||||
want: want,
|
want: want,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadingError(t ErrorSourceType, err error) error {
|
func LoadingError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to load %s from disk: %w", t, err)
|
return &Error{Source: t, Kind: KindLoad, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParsingError(t ErrorSourceType, err error) error {
|
func ParsingError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to parse %s: %w", t, err)
|
return &Error{Source: t, Kind: KindParse, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DecodeError(t ErrorSourceType, err error) error {
|
func DecodeError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to decode %s: %w", t, err)
|
return &Error{Source: t, Kind: KindDecode, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
func VerifyError(t ErrorSourceType, err error) error {
|
func VerifyError(t ErrorSourceType, err error) error {
|
||||||
return fmt.Errorf("failed to verify %s: %w", t, err)
|
return &Error{Source: t, Kind: KindVerify, Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
var ErrEncryptedPrivateKey = errors.New("private key is encrypted")
|
||||||
|
|||||||
56
certlib/certerr/errors_test.go
Normal file
56
certlib/certerr/errors_test.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
|
package certerr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTypedErrorWrappingAndFormatting(t *testing.T) {
|
||||||
|
cause := errors.New("bad data")
|
||||||
|
err := DecodeError(ErrorSourceCertificate, cause)
|
||||||
|
|
||||||
|
// Ensure we can retrieve the typed error
|
||||||
|
var e *Error
|
||||||
|
if !errors.As(err, &e) {
|
||||||
|
t.Fatalf("expected errors.As to retrieve *certerr.Error, got %T", err)
|
||||||
|
}
|
||||||
|
if e.Kind != KindDecode {
|
||||||
|
t.Fatalf("unexpected kind: %v", e.Kind)
|
||||||
|
}
|
||||||
|
if e.Source != ErrorSourceCertificate {
|
||||||
|
t.Fatalf("unexpected source: %v", e.Source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check message format (no trailing punctuation enforced by content)
|
||||||
|
msg := e.Error()
|
||||||
|
if !strings.Contains(msg, "failed to decode certificate") || !strings.Contains(msg, "bad data") {
|
||||||
|
t.Fatalf("unexpected error message: %q", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorsIsOnWrappedSentinel(t *testing.T) {
|
||||||
|
err := DecodeError(ErrorSourcePrivateKey, ErrEncryptedPrivateKey)
|
||||||
|
if !errors.Is(err, ErrEncryptedPrivateKey) {
|
||||||
|
t.Fatalf("expected errors.Is to match ErrEncryptedPrivateKey")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidPEMTypeMessageSingle(t *testing.T) {
|
||||||
|
err := ErrInvalidPEMType("FOO", "CERTIFICATE")
|
||||||
|
want := "invalid PEM type: have FOO, expected CERTIFICATE"
|
||||||
|
if err.Error() != want {
|
||||||
|
t.Fatalf("unexpected error message: got %q, want %q", err.Error(), want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidPEMTypeMessageMultiple(t *testing.T) {
|
||||||
|
err := ErrInvalidPEMType("FOO", "CERTIFICATE", "NEW CERTIFICATE REQUEST")
|
||||||
|
if !strings.Contains(
|
||||||
|
err.Error(),
|
||||||
|
"invalid PEM type: have FOO, expected one of CERTIFICATE, NEW CERTIFICATE REQUEST",
|
||||||
|
) {
|
||||||
|
t.Fatalf("unexpected error message: %q", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,43 +4,53 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"os"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
// ReadCertificate reads a DER or PEM-encoded certificate from the
|
||||||
// byte slice.
|
// byte slice.
|
||||||
func ReadCertificate(in []byte) (cert *x509.Certificate, rest []byte, err error) {
|
func ReadCertificate(in []byte) (*x509.Certificate, []byte, error) {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
err = certerr.ErrEmptyCertificate
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, certerr.ErrEmptyCertificate)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if in[0] == '-' {
|
if in[0] == '-' {
|
||||||
p, remaining := pem.Decode(in)
|
p, remaining := pem.Decode(in)
|
||||||
if p == nil {
|
if p == nil {
|
||||||
err = errors.New("certlib: invalid PEM file")
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("invalid PEM file"))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rest = remaining
|
rest := remaining
|
||||||
if p.Type != "CERTIFICATE" {
|
if p.Type != "CERTIFICATE" {
|
||||||
err = certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE")
|
return nil, rest, certerr.ParsingError(
|
||||||
return
|
certerr.ErrorSourceCertificate,
|
||||||
|
certerr.ErrInvalidPEMType(p.Type, "CERTIFICATE"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
in = p.Bytes
|
in = p.Bytes
|
||||||
|
cert, err := x509.ParseCertificate(in)
|
||||||
|
if err != nil {
|
||||||
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return cert, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err = x509.ParseCertificate(in)
|
cert, err := x509.ParseCertificate(in)
|
||||||
return
|
if err != nil {
|
||||||
|
return nil, nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return cert, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCertificates tries to read all the certificates in a
|
// ReadCertificates tries to read all the certificates in a
|
||||||
// PEM-encoded collection.
|
// PEM-encoded collection.
|
||||||
func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
func ReadCertificates(in []byte) ([]*x509.Certificate, error) {
|
||||||
var cert *x509.Certificate
|
var cert *x509.Certificate
|
||||||
|
var certs []*x509.Certificate
|
||||||
|
var err error
|
||||||
for {
|
for {
|
||||||
cert, in, err = ReadCertificate(in)
|
cert, in, err = ReadCertificate(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -64,9 +74,9 @@ func ReadCertificates(in []byte) (certs []*x509.Certificate, err error) {
|
|||||||
// the file contains multiple certificates (e.g. a chain), only the
|
// the file contains multiple certificates (e.g. a chain), only the
|
||||||
// first certificate is returned.
|
// first certificate is returned.
|
||||||
func LoadCertificate(path string) (*x509.Certificate, error) {
|
func LoadCertificate(path string) (*x509.Certificate, error) {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, _, err := ReadCertificate(in)
|
cert, _, err := ReadCertificate(in)
|
||||||
@@ -76,9 +86,9 @@ func LoadCertificate(path string) (*x509.Certificate, error) {
|
|||||||
// LoadCertificates tries to read all the certificates in a file,
|
// LoadCertificates tries to read all the certificates in a file,
|
||||||
// returning them in the order that it found them in the file.
|
// returning them in the order that it found them in the file.
|
||||||
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
func LoadCertificates(path string) ([]*x509.Certificate, error) {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ReadCertificates(in)
|
return ReadCertificates(in)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
package certlib
|
package certlib
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ import (
|
|||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
@@ -47,29 +48,36 @@ import (
|
|||||||
// private key. The key must not be in PEM format. If an error is returned, it
|
// private key. The key must not be in PEM format. If an error is returned, it
|
||||||
// may contain information about the private key, so care should be taken when
|
// may contain information about the private key, so care should be taken when
|
||||||
// displaying it directly.
|
// displaying it directly.
|
||||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyDER(keyDER []byte) (crypto.Signer, error) {
|
||||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
// Try common encodings in order without deep nesting.
|
||||||
if err != nil {
|
if k, err := x509.ParsePKCS8PrivateKey(keyDER); err == nil {
|
||||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
switch kk := k.(type) {
|
||||||
if err != nil {
|
case *rsa.PrivateKey:
|
||||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
return kk, nil
|
||||||
if err != nil {
|
case *ecdsa.PrivateKey:
|
||||||
generalKey, err = ParseEd25519PrivateKey(keyDER)
|
return kk, nil
|
||||||
if err != nil {
|
case ed25519.PrivateKey:
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
return kk, nil
|
||||||
}
|
default:
|
||||||
}
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if k, err := x509.ParsePKCS1PrivateKey(keyDER); err == nil {
|
||||||
switch generalKey := generalKey.(type) {
|
return k, nil
|
||||||
case *rsa.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
return generalKey, nil
|
|
||||||
default:
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %t", generalKey))
|
|
||||||
}
|
}
|
||||||
|
if k, err := x509.ParseECPrivateKey(keyDER); err == nil {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
if k, err := ParseEd25519PrivateKey(keyDER); err == nil {
|
||||||
|
if kk, ok := k.(ed25519.PrivateKey); ok {
|
||||||
|
return kk, nil
|
||||||
|
}
|
||||||
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, fmt.Errorf("unknown key type %T", k))
|
||||||
|
}
|
||||||
|
// If all parsers failed, return the last error from Ed25519 attempt (approximate cause).
|
||||||
|
if _, err := ParseEd25519PrivateKey(keyDER); err != nil {
|
||||||
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, err)
|
||||||
|
}
|
||||||
|
// Fallback (should be unreachable)
|
||||||
|
return nil, certerr.ParsingError(certerr.ErrorSourcePrivateKey, errors.New("unknown key encoding"))
|
||||||
}
|
}
|
||||||
|
|||||||
339
certlib/dump/dump.go
Normal file
339
certlib/dump/dump.go
Normal file
@@ -0,0 +1,339 @@
|
|||||||
|
// Package dump implements tooling for dumping certificate information.
|
||||||
|
package dump
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/dsa"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/kr/text"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sSHA256 = "SHA256"
|
||||||
|
sSHA512 = "SHA512"
|
||||||
|
)
|
||||||
|
|
||||||
|
var keyUsage = map[x509.KeyUsage]string{
|
||||||
|
x509.KeyUsageDigitalSignature: "digital signature",
|
||||||
|
x509.KeyUsageContentCommitment: "content commitment",
|
||||||
|
x509.KeyUsageKeyEncipherment: "key encipherment",
|
||||||
|
x509.KeyUsageKeyAgreement: "key agreement",
|
||||||
|
x509.KeyUsageDataEncipherment: "data encipherment",
|
||||||
|
x509.KeyUsageCertSign: "cert sign",
|
||||||
|
x509.KeyUsageCRLSign: "crl sign",
|
||||||
|
x509.KeyUsageEncipherOnly: "encipher only",
|
||||||
|
x509.KeyUsageDecipherOnly: "decipher only",
|
||||||
|
}
|
||||||
|
|
||||||
|
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
||||||
|
x509.ExtKeyUsageAny: "any",
|
||||||
|
x509.ExtKeyUsageServerAuth: "server auth",
|
||||||
|
x509.ExtKeyUsageClientAuth: "client auth",
|
||||||
|
x509.ExtKeyUsageCodeSigning: "code signing",
|
||||||
|
x509.ExtKeyUsageEmailProtection: "s/mime",
|
||||||
|
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
||||||
|
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
||||||
|
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
||||||
|
x509.ExtKeyUsageTimeStamping: "timestamping",
|
||||||
|
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
||||||
|
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
||||||
|
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
||||||
|
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
|
||||||
|
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
|
||||||
|
}
|
||||||
|
|
||||||
|
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
||||||
|
switch a {
|
||||||
|
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
||||||
|
return "RSA"
|
||||||
|
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
|
||||||
|
return "RSA-PSS"
|
||||||
|
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||||
|
return "ECDSA"
|
||||||
|
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||||
|
return "DSA"
|
||||||
|
case x509.PureEd25519:
|
||||||
|
return "Ed25519"
|
||||||
|
case x509.UnknownSignatureAlgorithm:
|
||||||
|
return "unknown public key algorithm"
|
||||||
|
default:
|
||||||
|
return "unknown public key algorithm"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||||
|
switch a {
|
||||||
|
case x509.MD2WithRSA:
|
||||||
|
return "MD2"
|
||||||
|
case x509.MD5WithRSA:
|
||||||
|
return "MD5"
|
||||||
|
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
||||||
|
return "SHA1"
|
||||||
|
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
||||||
|
return sSHA256
|
||||||
|
case x509.SHA256WithRSAPSS:
|
||||||
|
return sSHA256
|
||||||
|
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||||
|
return "SHA384"
|
||||||
|
case x509.SHA384WithRSAPSS:
|
||||||
|
return "SHA384"
|
||||||
|
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||||
|
return sSHA512
|
||||||
|
case x509.SHA512WithRSAPSS:
|
||||||
|
return sSHA512
|
||||||
|
case x509.PureEd25519:
|
||||||
|
return sSHA512
|
||||||
|
case x509.UnknownSignatureAlgorithm:
|
||||||
|
return "unknown hash algorithm"
|
||||||
|
default:
|
||||||
|
return "unknown hash algorithm"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxLine = 78
|
||||||
|
|
||||||
|
func makeIndent(n int) string {
|
||||||
|
s := " "
|
||||||
|
var sSb97 strings.Builder
|
||||||
|
for range n {
|
||||||
|
sSb97.WriteString(" ")
|
||||||
|
}
|
||||||
|
s += sSb97.String()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func indentLen(n int) int {
|
||||||
|
return 4 + (8 * n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this isn't real efficient, but that's not a problem here.
|
||||||
|
func wrap(s string, indent int) string {
|
||||||
|
if indent > 3 {
|
||||||
|
indent = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapped := text.Wrap(s, maxLine)
|
||||||
|
lines := strings.SplitN(wrapped, "\n", 2)
|
||||||
|
if len(lines) == 1 {
|
||||||
|
return lines[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (maxLine - indentLen(indent)) <= 0 {
|
||||||
|
panic("too much indentation")
|
||||||
|
}
|
||||||
|
|
||||||
|
rest := strings.Join(lines[1:], " ")
|
||||||
|
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
||||||
|
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpHex(in []byte) string {
|
||||||
|
return lib.HexEncode(in, lib.HexEncodeUpperColon)
|
||||||
|
}
|
||||||
|
|
||||||
|
func certPublic(cert *x509.Certificate) string {
|
||||||
|
switch pub := cert.PublicKey.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
return fmt.Sprintf("RSA-%d", pub.N.BitLen())
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
switch pub.Curve {
|
||||||
|
case elliptic.P256():
|
||||||
|
return "ECDSA-prime256v1"
|
||||||
|
case elliptic.P384():
|
||||||
|
return "ECDSA-secp384r1"
|
||||||
|
case elliptic.P521():
|
||||||
|
return "ECDSA-secp521r1"
|
||||||
|
default:
|
||||||
|
return "ECDSA (unknown curve)"
|
||||||
|
}
|
||||||
|
case *dsa.PublicKey:
|
||||||
|
return "DSA"
|
||||||
|
default:
|
||||||
|
return "Unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DisplayName(name pkix.Name) string {
|
||||||
|
var ns []string
|
||||||
|
|
||||||
|
if name.CommonName != "" {
|
||||||
|
ns = append(ns, name.CommonName)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range name.Country {
|
||||||
|
ns = append(ns, fmt.Sprintf("C=%s", name.Country[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range name.Organization {
|
||||||
|
ns = append(ns, fmt.Sprintf("O=%s", name.Organization[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range name.OrganizationalUnit {
|
||||||
|
ns = append(ns, fmt.Sprintf("OU=%s", name.OrganizationalUnit[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range name.Locality {
|
||||||
|
ns = append(ns, fmt.Sprintf("L=%s", name.Locality[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range name.Province {
|
||||||
|
ns = append(ns, fmt.Sprintf("ST=%s", name.Province[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ns) > 0 {
|
||||||
|
return "/" + strings.Join(ns, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
return "*** no subject information ***"
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyUsages(ku x509.KeyUsage) string {
|
||||||
|
var uses []string
|
||||||
|
|
||||||
|
for u, s := range keyUsage {
|
||||||
|
if (ku & u) != 0 {
|
||||||
|
uses = append(uses, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(uses)
|
||||||
|
|
||||||
|
return strings.Join(uses, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func extUsage(ext []x509.ExtKeyUsage) string {
|
||||||
|
ns := make([]string, 0, len(ext))
|
||||||
|
for i := range ext {
|
||||||
|
ns = append(ns, extKeyUsages[ext[i]])
|
||||||
|
}
|
||||||
|
sort.Strings(ns)
|
||||||
|
|
||||||
|
return strings.Join(ns, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func showBasicConstraints(cert *x509.Certificate) {
|
||||||
|
fmt.Fprint(os.Stdout, "\tBasic constraints: ")
|
||||||
|
if cert.BasicConstraintsValid {
|
||||||
|
fmt.Fprint(os.Stdout, "valid")
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, "invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cert.IsCA {
|
||||||
|
fmt.Fprint(os.Stdout, ", is a CA certificate")
|
||||||
|
if !cert.BasicConstraintsValid {
|
||||||
|
fmt.Fprint(os.Stdout, " (basic constraint failure)")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stdout, ", is not a CA certificate")
|
||||||
|
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||||
|
fmt.Fprint(os.Stdout, " (key encipherment usage enabled!)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
||||||
|
fmt.Fprintf(os.Stdout, ", max path length %d", cert.MaxPathLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dateFormat string
|
||||||
|
showHash bool // if true, print a SHA256 hash of the certificate's Raw field
|
||||||
|
)
|
||||||
|
|
||||||
|
func wrapPrint(text string, indent int) {
|
||||||
|
tabs := ""
|
||||||
|
var tabsSb140 strings.Builder
|
||||||
|
for range indent {
|
||||||
|
tabsSb140.WriteString("\t")
|
||||||
|
}
|
||||||
|
tabs += tabsSb140.String()
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, tabs+"%s\n", wrap(text, indent))
|
||||||
|
}
|
||||||
|
|
||||||
|
func DisplayCert(w io.Writer, cert *x509.Certificate) {
|
||||||
|
fmt.Fprintln(w, "CERTIFICATE")
|
||||||
|
if showHash {
|
||||||
|
fmt.Fprintln(w, wrap(fmt.Sprintf("SHA256: %x", sha256.Sum256(cert.Raw)), 0))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, wrap("Subject: "+DisplayName(cert.Subject), 0))
|
||||||
|
fmt.Fprintln(w, wrap("Issuer: "+DisplayName(cert.Issuer), 0))
|
||||||
|
fmt.Fprintf(w, "\tSignature algorithm: %s / %s\n", sigAlgoPK(cert.SignatureAlgorithm),
|
||||||
|
sigAlgoHash(cert.SignatureAlgorithm))
|
||||||
|
fmt.Fprintln(w, "Details:")
|
||||||
|
wrapPrint("Public key: "+certPublic(cert), 1)
|
||||||
|
fmt.Fprintf(w, "\tSerial number: %s\n", cert.SerialNumber)
|
||||||
|
|
||||||
|
if len(cert.AuthorityKeyId) > 0 {
|
||||||
|
fmt.Fprintf(w, "\t%s\n", wrap("AKI: "+dumpHex(cert.AuthorityKeyId), 1))
|
||||||
|
}
|
||||||
|
if len(cert.SubjectKeyId) > 0 {
|
||||||
|
fmt.Fprintf(w, "\t%s\n", wrap("SKI: "+dumpHex(cert.SubjectKeyId), 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapPrint("Valid from: "+cert.NotBefore.Format(dateFormat), 1)
|
||||||
|
fmt.Fprintf(w, "\t until: %s\n", cert.NotAfter.Format(dateFormat))
|
||||||
|
fmt.Fprintf(w, "\tKey usages: %s\n", keyUsages(cert.KeyUsage))
|
||||||
|
|
||||||
|
if len(cert.ExtKeyUsage) > 0 {
|
||||||
|
fmt.Fprintf(w, "\tExtended usages: %s\n", extUsage(cert.ExtKeyUsage))
|
||||||
|
}
|
||||||
|
|
||||||
|
showBasicConstraints(cert)
|
||||||
|
|
||||||
|
validNames := make([]string, 0, len(cert.DNSNames)+len(cert.EmailAddresses)+len(cert.IPAddresses))
|
||||||
|
for i := range cert.DNSNames {
|
||||||
|
validNames = append(validNames, "dns:"+cert.DNSNames[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range cert.EmailAddresses {
|
||||||
|
validNames = append(validNames, "email:"+cert.EmailAddresses[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range cert.IPAddresses {
|
||||||
|
validNames = append(validNames, "ip:"+cert.IPAddresses[i].String())
|
||||||
|
}
|
||||||
|
|
||||||
|
sans := fmt.Sprintf("SANs (%d): %s\n", len(validNames), strings.Join(validNames, ", "))
|
||||||
|
wrapPrint(sans, 1)
|
||||||
|
|
||||||
|
l := len(cert.IssuingCertificateURL)
|
||||||
|
if l != 0 {
|
||||||
|
var aia string
|
||||||
|
if l == 1 {
|
||||||
|
aia = "AIA"
|
||||||
|
} else {
|
||||||
|
aia = "AIAs"
|
||||||
|
}
|
||||||
|
wrapPrint(fmt.Sprintf("%d %s:", l, aia), 1)
|
||||||
|
for _, url := range cert.IssuingCertificateURL {
|
||||||
|
wrapPrint(url, 2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l = len(cert.OCSPServer)
|
||||||
|
if l > 0 {
|
||||||
|
title := "OCSP server"
|
||||||
|
if l > 1 {
|
||||||
|
title += "s"
|
||||||
|
}
|
||||||
|
wrapPrint(title+":\n", 1)
|
||||||
|
for _, ocspServer := range cert.OCSPServer {
|
||||||
|
wrapPrint(fmt.Sprintf("- %s\n", ocspServer), 2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -65,12 +65,14 @@ func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
|
|||||||
return nil, errEd25519WrongKeyType
|
return nil, errEd25519WrongKeyType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const bitsPerByte = 8
|
||||||
|
|
||||||
spki := subjectPublicKeyInfo{
|
spki := subjectPublicKeyInfo{
|
||||||
Algorithm: pkix.AlgorithmIdentifier{
|
Algorithm: pkix.AlgorithmIdentifier{
|
||||||
Algorithm: ed25519OID,
|
Algorithm: ed25519OID,
|
||||||
},
|
},
|
||||||
PublicKey: asn1.BitString{
|
PublicKey: asn1.BitString{
|
||||||
BitLength: len(pub) * 8,
|
BitLength: len(pub) * bitsPerByte,
|
||||||
Bytes: pub,
|
Bytes: pub,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -91,7 +93,8 @@ func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
|
|||||||
return nil, errEd25519WrongID
|
return nil, errEd25519WrongID
|
||||||
}
|
}
|
||||||
|
|
||||||
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
|
const bitsPerByte = 8
|
||||||
|
if spki.PublicKey.BitLength != ed25519.PublicKeySize*bitsPerByte {
|
||||||
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,14 +49,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
|
||||||
|
|
||||||
ct "github.com/google/certificate-transparency-go"
|
ct "github.com/google/certificate-transparency-go"
|
||||||
cttls "github.com/google/certificate-transparency-go/tls"
|
cttls "github.com/google/certificate-transparency-go/tls"
|
||||||
ctx509 "github.com/google/certificate-transparency-go/x509"
|
ctx509 "github.com/google/certificate-transparency-go/x509"
|
||||||
"golang.org/x/crypto/ocsp"
|
"golang.org/x/crypto/ocsp"
|
||||||
"golang.org/x/crypto/pkcs12"
|
"golang.org/x/crypto/pkcs12"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/certerr"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/pkcs7"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||||
@@ -65,10 +65,10 @@ const OneYear = 8760 * time.Hour
|
|||||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||||
const OneDay = 24 * time.Hour
|
const OneDay = 24 * time.Hour
|
||||||
|
|
||||||
// DelegationUsage is the OID for the DelegationUseage extensions
|
// DelegationUsage is the OID for the DelegationUseage extensions.
|
||||||
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
var DelegationUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 44363, 44}
|
||||||
|
|
||||||
// DelegationExtension
|
// DelegationExtension is a non-critical extension marking delegation usage.
|
||||||
var DelegationExtension = pkix.Extension{
|
var DelegationExtension = pkix.Extension{
|
||||||
Id: DelegationUsage,
|
Id: DelegationUsage,
|
||||||
Critical: false,
|
Critical: false,
|
||||||
@@ -81,41 +81,51 @@ func InclusiveDate(year int, month time.Month, day int) time.Time {
|
|||||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
year2012 = 2012
|
||||||
|
year2015 = 2015
|
||||||
|
day1 = 1
|
||||||
|
)
|
||||||
|
|
||||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||||
// issuing certificates valid for more than 5 years.
|
// issuing certificates valid for more than 5 years.
|
||||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
var Jul2012 = InclusiveDate(year2012, time.July, day1)
|
||||||
|
|
||||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||||
// issuing certificates valid for more than 39 months.
|
// issuing certificates valid for more than 39 months.
|
||||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
var Apr2015 = InclusiveDate(year2015, time.April, day1)
|
||||||
|
|
||||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
// KeyLength returns the bit size of ECDSA or RSA PublicKey.
|
||||||
func KeyLength(key interface{}) int {
|
func KeyLength(key any) int {
|
||||||
if key == nil {
|
switch k := key.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
if k == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return k.Curve.Params().BitSize
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
if k == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return k.N.BitLen()
|
||||||
|
default:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
|
||||||
return ecdsaKey.Curve.Params().BitSize
|
|
||||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
|
||||||
return rsaKey.N.BitLen()
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpiryTime returns the time when the certificate chain is expired.
|
// ExpiryTime returns the time when the certificate chain is expired.
|
||||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
func ExpiryTime(chain []*x509.Certificate) time.Time {
|
||||||
|
var notAfter time.Time
|
||||||
if len(chain) == 0 {
|
if len(chain) == 0 {
|
||||||
return
|
return notAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
notAfter = chain[0].NotAfter
|
notAfter = chain[0].NotAfter
|
||||||
for _, cert := range chain {
|
for _, cert := range chain {
|
||||||
if notAfter.After(cert.NotAfter) {
|
if notAfter.After(cert.NotAfter) {
|
||||||
notAfter = cert.NotAfter
|
notAfter = cert.NotAfter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return notAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
// MonthsValid returns the number of months for which a certificate is valid.
|
// MonthsValid returns the number of months for which a certificate is valid.
|
||||||
@@ -144,109 +154,109 @@ func ValidExpiry(c *x509.Certificate) bool {
|
|||||||
maxMonths = 39
|
maxMonths = 39
|
||||||
case issued.After(Jul2012):
|
case issued.After(Jul2012):
|
||||||
maxMonths = 60
|
maxMonths = 60
|
||||||
case issued.Before(Jul2012):
|
default:
|
||||||
maxMonths = 120
|
maxMonths = 120
|
||||||
}
|
}
|
||||||
|
|
||||||
if MonthsValid(c) > maxMonths {
|
return MonthsValid(c) <= maxMonths
|
||||||
return false
|
}
|
||||||
}
|
|
||||||
return true
|
// SignatureString returns the TLS signature string corresponding to
|
||||||
|
// an X509 signature algorithm.
|
||||||
|
var signatureString = map[x509.SignatureAlgorithm]string{
|
||||||
|
x509.UnknownSignatureAlgorithm: "Unknown Signature",
|
||||||
|
x509.MD2WithRSA: "MD2WithRSA",
|
||||||
|
x509.MD5WithRSA: "MD5WithRSA",
|
||||||
|
x509.SHA1WithRSA: "SHA1WithRSA",
|
||||||
|
x509.SHA256WithRSA: "SHA256WithRSA",
|
||||||
|
x509.SHA384WithRSA: "SHA384WithRSA",
|
||||||
|
x509.SHA512WithRSA: "SHA512WithRSA",
|
||||||
|
x509.SHA256WithRSAPSS: "SHA256WithRSAPSS",
|
||||||
|
x509.SHA384WithRSAPSS: "SHA384WithRSAPSS",
|
||||||
|
x509.SHA512WithRSAPSS: "SHA512WithRSAPSS",
|
||||||
|
x509.DSAWithSHA1: "DSAWithSHA1",
|
||||||
|
x509.DSAWithSHA256: "DSAWithSHA256",
|
||||||
|
x509.ECDSAWithSHA1: "ECDSAWithSHA1",
|
||||||
|
x509.ECDSAWithSHA256: "ECDSAWithSHA256",
|
||||||
|
x509.ECDSAWithSHA384: "ECDSAWithSHA384",
|
||||||
|
x509.ECDSAWithSHA512: "ECDSAWithSHA512",
|
||||||
|
x509.PureEd25519: "PureEd25519",
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignatureString returns the TLS signature string corresponding to
|
// SignatureString returns the TLS signature string corresponding to
|
||||||
// an X509 signature algorithm.
|
// an X509 signature algorithm.
|
||||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||||
switch alg {
|
if s, ok := signatureString[alg]; ok {
|
||||||
case x509.MD2WithRSA:
|
return s
|
||||||
return "MD2WithRSA"
|
|
||||||
case x509.MD5WithRSA:
|
|
||||||
return "MD5WithRSA"
|
|
||||||
case x509.SHA1WithRSA:
|
|
||||||
return "SHA1WithRSA"
|
|
||||||
case x509.SHA256WithRSA:
|
|
||||||
return "SHA256WithRSA"
|
|
||||||
case x509.SHA384WithRSA:
|
|
||||||
return "SHA384WithRSA"
|
|
||||||
case x509.SHA512WithRSA:
|
|
||||||
return "SHA512WithRSA"
|
|
||||||
case x509.DSAWithSHA1:
|
|
||||||
return "DSAWithSHA1"
|
|
||||||
case x509.DSAWithSHA256:
|
|
||||||
return "DSAWithSHA256"
|
|
||||||
case x509.ECDSAWithSHA1:
|
|
||||||
return "ECDSAWithSHA1"
|
|
||||||
case x509.ECDSAWithSHA256:
|
|
||||||
return "ECDSAWithSHA256"
|
|
||||||
case x509.ECDSAWithSHA384:
|
|
||||||
return "ECDSAWithSHA384"
|
|
||||||
case x509.ECDSAWithSHA512:
|
|
||||||
return "ECDSAWithSHA512"
|
|
||||||
default:
|
|
||||||
return "Unknown Signature"
|
|
||||||
}
|
}
|
||||||
|
return "Unknown Signature"
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||||
|
// method.
|
||||||
|
var hashAlgoString = map[x509.SignatureAlgorithm]string{
|
||||||
|
x509.UnknownSignatureAlgorithm: "Unknown Hash Algorithm",
|
||||||
|
x509.MD2WithRSA: "MD2",
|
||||||
|
x509.MD5WithRSA: "MD5",
|
||||||
|
x509.SHA1WithRSA: "SHA1",
|
||||||
|
x509.SHA256WithRSA: "SHA256",
|
||||||
|
x509.SHA384WithRSA: "SHA384",
|
||||||
|
x509.SHA512WithRSA: "SHA512",
|
||||||
|
x509.SHA256WithRSAPSS: "SHA256",
|
||||||
|
x509.SHA384WithRSAPSS: "SHA384",
|
||||||
|
x509.SHA512WithRSAPSS: "SHA512",
|
||||||
|
x509.DSAWithSHA1: "SHA1",
|
||||||
|
x509.DSAWithSHA256: "SHA256",
|
||||||
|
x509.ECDSAWithSHA1: "SHA1",
|
||||||
|
x509.ECDSAWithSHA256: "SHA256",
|
||||||
|
x509.ECDSAWithSHA384: "SHA384",
|
||||||
|
x509.ECDSAWithSHA512: "SHA512",
|
||||||
|
x509.PureEd25519: "SHA512", // per x509 docs Ed25519 uses SHA-512 internally
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||||
// method.
|
// method.
|
||||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||||
switch alg {
|
if s, ok := hashAlgoString[alg]; ok {
|
||||||
case x509.MD2WithRSA:
|
return s
|
||||||
return "MD2"
|
|
||||||
case x509.MD5WithRSA:
|
|
||||||
return "MD5"
|
|
||||||
case x509.SHA1WithRSA:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.SHA256WithRSA:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.SHA384WithRSA:
|
|
||||||
return "SHA384"
|
|
||||||
case x509.SHA512WithRSA:
|
|
||||||
return "SHA512"
|
|
||||||
case x509.DSAWithSHA1:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.DSAWithSHA256:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.ECDSAWithSHA1:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.ECDSAWithSHA256:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.ECDSAWithSHA384:
|
|
||||||
return "SHA384"
|
|
||||||
case x509.ECDSAWithSHA512:
|
|
||||||
return "SHA512"
|
|
||||||
default:
|
|
||||||
return "Unknown Hash Algorithm"
|
|
||||||
}
|
}
|
||||||
|
return "Unknown Hash Algorithm"
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringTLSVersion returns underlying enum values from human names for TLS
|
// StringTLSVersion returns underlying enum values from human names for TLS
|
||||||
// versions, defaults to current golang default of TLS 1.0
|
// versions, defaults to current golang default of TLS 1.0.
|
||||||
func StringTLSVersion(version string) uint16 {
|
func StringTLSVersion(version string) uint16 {
|
||||||
switch version {
|
switch version {
|
||||||
|
case "1.3":
|
||||||
|
return tls.VersionTLS13
|
||||||
case "1.2":
|
case "1.2":
|
||||||
return tls.VersionTLS12
|
return tls.VersionTLS12
|
||||||
case "1.1":
|
case "1.1":
|
||||||
return tls.VersionTLS11
|
return tls.VersionTLS11
|
||||||
|
case "1.0":
|
||||||
|
return tls.VersionTLS10
|
||||||
default:
|
default:
|
||||||
|
// Default to Go's historical default of TLS 1.0 for unknown values
|
||||||
return tls.VersionTLS10
|
return tls.VersionTLS10
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
|
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM.
|
||||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
for _, cert := range certs {
|
for _, cert := range certs {
|
||||||
pem.Encode(&buffer, &pem.Block{
|
if err := pem.Encode(&buffer, &pem.Block{
|
||||||
Type: "CERTIFICATE",
|
Type: "CERTIFICATE",
|
||||||
Bytes: cert.Raw,
|
Bytes: cert.Raw,
|
||||||
})
|
}); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return buffer.Bytes()
|
return buffer.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeCertificatePEM encodes a single x509 certificates to PEM
|
// EncodeCertificatePEM encodes a single x509 certificates to PEM.
|
||||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||||
}
|
}
|
||||||
@@ -269,38 +279,52 @@ func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
|||||||
certs = append(certs, cert...)
|
certs = append(certs, cert...)
|
||||||
}
|
}
|
||||||
if len(certsPEM) > 0 {
|
if len(certsPEM) > 0 {
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("trailing data at end of certificate"))
|
return nil, certerr.DecodeError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("trailing data at end of certificate"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return certs, nil
|
return certs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||||
// either PKCS #7, PKCS #12, or raw x509.
|
// either PKCS #7, PKCS #12, or raw x509.
|
||||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
func ParseCertificatesDER(certsDER []byte, password string) ([]*x509.Certificate, crypto.Signer, error) {
|
||||||
certsDER = bytes.TrimSpace(certsDER)
|
certsDER = bytes.TrimSpace(certsDER)
|
||||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
|
||||||
if err != nil {
|
// First, try PKCS #7
|
||||||
var pkcs12data interface{}
|
if pkcs7data, err7 := pkcs7.ParsePKCS7(certsDER); err7 == nil {
|
||||||
certs = make([]*x509.Certificate, 1)
|
|
||||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
|
||||||
if err != nil {
|
|
||||||
certs, err = x509.ParseCertificates(certsDER)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key = pkcs12data.(crypto.Signer)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if pkcs7data.ContentInfo != "SignedData" {
|
if pkcs7data.ContentInfo != "SignedData" {
|
||||||
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("can only extract certificates from signed data content info"))
|
return nil, nil, certerr.DecodeError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("can only extract certificates from signed data content info"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
certs = pkcs7data.Content.SignedData.Certificates
|
certs := pkcs7data.Content.SignedData.Certificates
|
||||||
|
if certs == nil {
|
||||||
|
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
||||||
|
}
|
||||||
|
return certs, nil, nil
|
||||||
}
|
}
|
||||||
if certs == nil {
|
|
||||||
return nil, key, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificates decoded"))
|
// Next, try PKCS #12
|
||||||
|
if pkcs12data, cert, err12 := pkcs12.Decode(certsDER, password); err12 == nil {
|
||||||
|
signer, ok := pkcs12data.(crypto.Signer)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, certerr.DecodeError(
|
||||||
|
certerr.ErrorSourcePrivateKey,
|
||||||
|
errors.New("PKCS12 data does not contain a private key"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return []*x509.Certificate{cert}, signer, nil
|
||||||
}
|
}
|
||||||
return certs, key, nil
|
|
||||||
|
// Finally, attempt to parse raw X.509 certificates
|
||||||
|
certs, err := x509.ParseCertificates(certsDER)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, certerr.DecodeError(certerr.ErrorSourceCertificate, err)
|
||||||
|
}
|
||||||
|
return certs, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||||
@@ -310,7 +334,8 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
err = cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature)
|
||||||
|
if err != nil {
|
||||||
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
return nil, certerr.VerifyError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
return cert, nil
|
return cert, nil
|
||||||
@@ -320,17 +345,26 @@ func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
// can handle PEM encoded PKCS #7 structures.
|
// can handle PEM encoded PKCS #7 structures.
|
||||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||||
certPEM = bytes.TrimSpace(certPEM)
|
certPEM = bytes.TrimSpace(certPEM)
|
||||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
certs, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
} else if cert == nil {
|
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
|
||||||
} else if len(rest) > 0 {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PEM file should contain only one object"))
|
|
||||||
} else if len(cert) > 1 {
|
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
|
||||||
}
|
}
|
||||||
return cert[0], nil
|
if certs == nil {
|
||||||
|
return nil, certerr.DecodeError(certerr.ErrorSourceCertificate, errors.New("no certificate decoded"))
|
||||||
|
}
|
||||||
|
if len(rest) > 0 {
|
||||||
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("the PEM file should contain only one object"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if len(certs) > 1 {
|
||||||
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("the PKCS7 object in the PEM file should contain only one certificate"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return certs[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||||
@@ -338,7 +372,6 @@ func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
|||||||
// multiple certificates, from the top of certsPEM, which itself may
|
// multiple certificates, from the top of certsPEM, which itself may
|
||||||
// contain multiple PEM encoded certificate objects.
|
// contain multiple PEM encoded certificate objects.
|
||||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||||
|
|
||||||
block, rest := pem.Decode(certsPEM)
|
block, rest := pem.Decode(certsPEM)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, rest, nil
|
return nil, rest, nil
|
||||||
@@ -346,8 +379,8 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
|||||||
|
|
||||||
cert, err := x509.ParseCertificate(block.Bytes)
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
pkcs7data, err2 := pkcs7.ParsePKCS7(block.Bytes)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return nil, rest, err
|
return nil, rest, err
|
||||||
}
|
}
|
||||||
if pkcs7data.ContentInfo != "SignedData" {
|
if pkcs7data.ContentInfo != "SignedData" {
|
||||||
@@ -363,10 +396,49 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
|||||||
return certs, rest, nil
|
return certs, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LoadFullCertPool returns a certificate pool with roots and intermediates
|
||||||
|
// from disk. If no roots are provided, the system root pool will be used.
|
||||||
|
func LoadFullCertPool(roots, intermediates string) (*x509.CertPool, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
|
||||||
|
if roots == "" {
|
||||||
|
pool, err = x509.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("loading system cert pool: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var rootCerts []*x509.Certificate
|
||||||
|
rootCerts, err = LoadCertificates(roots)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("loading roots: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cert := range rootCerts {
|
||||||
|
pool.AddCert(cert)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if intermediates != "" {
|
||||||
|
var intCerts []*x509.Certificate
|
||||||
|
intCerts, err = LoadCertificates(intermediates)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("loading intermediates: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cert := range intCerts {
|
||||||
|
pool.AddCert(cert)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool, nil
|
||||||
|
}
|
||||||
|
|
||||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||||
if certsFile == "" {
|
if certsFile == "" {
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // no CA file provided -> treat as no pool and no error
|
||||||
}
|
}
|
||||||
pemCerts, err := os.ReadFile(certsFile)
|
pemCerts, err := os.ReadFile(certsFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -379,12 +451,12 @@ func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
|||||||
// PEMToCertPool concerts PEM certificates to a CertPool.
|
// PEMToCertPool concerts PEM certificates to a CertPool.
|
||||||
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
||||||
if len(pemCerts) == 0 {
|
if len(pemCerts) == 0 {
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // empty input means no pool needed
|
||||||
}
|
}
|
||||||
|
|
||||||
certPool := x509.NewCertPool()
|
certPool := x509.NewCertPool()
|
||||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||||
return nil, errors.New("failed to load cert pool")
|
return nil, certerr.LoadingError(certerr.ErrorSourceCertificate, errors.New("failed to load cert pool"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return certPool, nil
|
return certPool, nil
|
||||||
@@ -393,14 +465,14 @@ func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
|
|||||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||||
// or elliptic private key.
|
// or elliptic private key.
|
||||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyPEM(keyPEM []byte) (crypto.Signer, error) {
|
||||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||||
// or elliptic private key.
|
// or elliptic private key.
|
||||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (crypto.Signer, error) {
|
||||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -420,44 +492,47 @@ func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if keyDER != nil {
|
if keyDER == nil {
|
||||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
||||||
if strings.Contains(procType, "ENCRYPTED") {
|
|
||||||
if password != nil {
|
|
||||||
return x509.DecryptPEMBlock(keyDER, password)
|
|
||||||
}
|
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return keyDER.Bytes, nil
|
|
||||||
}
|
}
|
||||||
|
if procType, ok := keyDER.Headers["Proc-Type"]; ok && strings.Contains(procType, "ENCRYPTED") {
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, errors.New("failed to decode private key"))
|
if password != nil {
|
||||||
|
return x509.DecryptPEMBlock(keyDER, password)
|
||||||
|
}
|
||||||
|
return nil, certerr.DecodeError(certerr.ErrorSourcePrivateKey, certerr.ErrEncryptedPrivateKey)
|
||||||
|
}
|
||||||
|
return keyDER.Bytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
func ParseCSR(in []byte) (*x509.CertificateRequest, []byte, error) {
|
||||||
in = bytes.TrimSpace(in)
|
in = bytes.TrimSpace(in)
|
||||||
p, rest := pem.Decode(in)
|
p, rest := pem.Decode(in)
|
||||||
if p != nil {
|
if p == nil {
|
||||||
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
csr, err := x509.ParseCertificateRequest(in)
|
||||||
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"))
|
if err != nil {
|
||||||
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
}
|
}
|
||||||
|
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||||
} else {
|
}
|
||||||
csr, err = x509.ParseCertificateRequest(in)
|
return csr, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
|
||||||
|
return nil, rest, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCSR,
|
||||||
|
certerr.ErrInvalidPEMType(p.Type, "NEW CERTIFICATE REQUEST", "CERTIFICATE REQUEST"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
csr, err := x509.ParseCertificateRequest(p.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, rest, err
|
return nil, rest, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
}
|
}
|
||||||
|
if sigErr := csr.CheckSignature(); sigErr != nil {
|
||||||
err = csr.CheckSignature()
|
return nil, rest, certerr.VerifyError(certerr.ErrorSourceCSR, sigErr)
|
||||||
if err != nil {
|
|
||||||
return nil, rest, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return csr, rest, nil
|
return csr, rest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -465,14 +540,14 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
|
|||||||
// It does not check the signature. This is useful for dumping data from a CSR
|
// It does not check the signature. This is useful for dumping data from a CSR
|
||||||
// locally.
|
// locally.
|
||||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||||
block, _ := pem.Decode([]byte(csrPEM))
|
block, _ := pem.Decode(csrPEM)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
return nil, certerr.DecodeError(certerr.ErrorSourceCSR, errors.New("PEM block is empty"))
|
||||||
}
|
}
|
||||||
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, certerr.ParsingError(certerr.ErrorSourceCSR, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return csrObject, nil
|
return csrObject, nil
|
||||||
@@ -480,15 +555,20 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
|||||||
|
|
||||||
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
|
||||||
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||||
|
const (
|
||||||
|
rsaBits2048 = 2048
|
||||||
|
rsaBits3072 = 3072
|
||||||
|
rsaBits4096 = 4096
|
||||||
|
)
|
||||||
switch pub := priv.Public().(type) {
|
switch pub := priv.Public().(type) {
|
||||||
case *rsa.PublicKey:
|
case *rsa.PublicKey:
|
||||||
bitLength := pub.N.BitLen()
|
bitLength := pub.N.BitLen()
|
||||||
switch {
|
switch {
|
||||||
case bitLength >= 4096:
|
case bitLength >= rsaBits4096:
|
||||||
return x509.SHA512WithRSA
|
return x509.SHA512WithRSA
|
||||||
case bitLength >= 3072:
|
case bitLength >= rsaBits3072:
|
||||||
return x509.SHA384WithRSA
|
return x509.SHA384WithRSA
|
||||||
case bitLength >= 2048:
|
case bitLength >= rsaBits2048:
|
||||||
return x509.SHA256WithRSA
|
return x509.SHA256WithRSA
|
||||||
default:
|
default:
|
||||||
return x509.SHA1WithRSA
|
return x509.SHA1WithRSA
|
||||||
@@ -509,7 +589,7 @@ func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadClientCertificate load key/certificate from pem files
|
// LoadClientCertificate load key/certificate from pem files.
|
||||||
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
|
||||||
if certFile != "" && keyFile != "" {
|
if certFile != "" && keyFile != "" {
|
||||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
@@ -518,10 +598,10 @@ func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, e
|
|||||||
}
|
}
|
||||||
return &cert, nil
|
return &cert, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil //nolint:nilnil // absence of client cert is not an error
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTLSConfig creates a tls.Config object from certs and roots
|
// CreateTLSConfig creates a tls.Config object from certs and roots.
|
||||||
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
|
||||||
var certs []tls.Certificate
|
var certs []tls.Certificate
|
||||||
if cert != nil {
|
if cert != nil {
|
||||||
@@ -530,6 +610,7 @@ func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Confi
|
|||||||
return &tls.Config{
|
return &tls.Config{
|
||||||
Certificates: certs,
|
Certificates: certs,
|
||||||
RootCAs: remoteCAs,
|
RootCAs: remoteCAs,
|
||||||
|
MinVersion: tls.VersionTLS12, // secure default
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -554,18 +635,24 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(rest) != 0 {
|
if len(rest) != 0 {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceSCTList,
|
||||||
|
errors.New("serialized SCT list contained trailing garbage"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
|
||||||
for i, serializedSCT := range sctList.SCTList {
|
for i, serializedSCT := range sctList.SCTList {
|
||||||
var sct ct.SignedCertificateTimestamp
|
var sct ct.SignedCertificateTimestamp
|
||||||
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
|
rest2, err2 := cttls.Unmarshal(serializedSCT.Val, &sct)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return nil, err
|
return nil, err2
|
||||||
}
|
}
|
||||||
if len(rest) != 0 {
|
if len(rest2) != 0 {
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceSCTList, errors.New("serialized SCT list contained trailing garbage"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceSCTList,
|
||||||
|
errors.New("serialized SCT list contained trailing garbage"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
list[i] = sct
|
list[i] = sct
|
||||||
}
|
}
|
||||||
@@ -577,12 +664,12 @@ func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta
|
|||||||
// unmarshalled.
|
// unmarshalled.
|
||||||
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
|
||||||
// This loop finds the SCTListExtension in the OCSP response.
|
// This loop finds the SCTListExtension in the OCSP response.
|
||||||
var SCTListExtension, ext pkix.Extension
|
var sctListExtension, ext pkix.Extension
|
||||||
for _, ext = range response.Extensions {
|
for _, ext = range response.Extensions {
|
||||||
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
|
||||||
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
|
||||||
if ext.Id.Equal(sctExtOid) {
|
if ext.Id.Equal(sctExtOid) {
|
||||||
SCTListExtension = ext
|
sctListExtension = ext
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -590,10 +677,10 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
|||||||
// This code block extracts the sctList from the SCT extension.
|
// This code block extracts the sctList from the SCT extension.
|
||||||
var sctList []ct.SignedCertificateTimestamp
|
var sctList []ct.SignedCertificateTimestamp
|
||||||
var err error
|
var err error
|
||||||
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
|
if numBytes := len(sctListExtension.Value); numBytes != 0 {
|
||||||
var serializedSCTList []byte
|
var serializedSCTList []byte
|
||||||
rest := make([]byte, numBytes)
|
rest := make([]byte, numBytes)
|
||||||
copy(rest, SCTListExtension.Value)
|
copy(rest, sctListExtension.Value)
|
||||||
for len(rest) != 0 {
|
for len(rest) != 0 {
|
||||||
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -611,20 +698,16 @@ func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTim
|
|||||||
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
// the subsequent file. If no prefix is provided, valFile is assumed to be a
|
||||||
// file path.
|
// file path.
|
||||||
func ReadBytes(valFile string) ([]byte, error) {
|
func ReadBytes(valFile string) ([]byte, error) {
|
||||||
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
|
prefix, rest, found := strings.Cut(valFile, ":")
|
||||||
case 1:
|
if !found {
|
||||||
return os.ReadFile(valFile)
|
return os.ReadFile(valFile)
|
||||||
case 2:
|
}
|
||||||
switch splitVal[0] {
|
switch prefix {
|
||||||
case "env":
|
case "env":
|
||||||
return []byte(os.Getenv(splitVal[1])), nil
|
return []byte(os.Getenv(rest)), nil
|
||||||
case "file":
|
case "file":
|
||||||
return os.ReadFile(splitVal[1])
|
return os.ReadFile(rest)
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("multiple prefixes: %s",
|
return nil, fmt.Errorf("unknown prefix: %s", prefix)
|
||||||
strings.Join(splitVal[:len(splitVal)-1], ", "))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultHTTPSPort = 443
|
||||||
|
|
||||||
type Target struct {
|
type Target struct {
|
||||||
Host string
|
Host string
|
||||||
Port int
|
Port int
|
||||||
@@ -24,45 +26,50 @@ func parseURL(host string) (string, int, error) {
|
|||||||
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.ToLower(url.Scheme) != "https" {
|
switch strings.ToLower(url.Scheme) {
|
||||||
|
case "https":
|
||||||
|
// OK
|
||||||
|
case "tls":
|
||||||
|
// OK
|
||||||
|
default:
|
||||||
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
if url.Port() == "" {
|
if url.Port() == "" {
|
||||||
return url.Hostname(), 443, nil
|
return url.Hostname(), defaultHTTPSPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
port, err := strconv.ParseInt(url.Port(), 10, 16)
|
portInt, err2 := strconv.ParseInt(url.Port(), 10, 16)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", url.Port())
|
||||||
}
|
}
|
||||||
|
|
||||||
return url.Hostname(), int(port), nil
|
return url.Hostname(), int(portInt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHostPort(host string) (string, int, error) {
|
func parseHostPort(host string) (string, int, error) {
|
||||||
host, sport, err := net.SplitHostPort(host)
|
shost, sport, err := net.SplitHostPort(host)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
port, err := strconv.ParseInt(sport, 10, 16)
|
portInt, err2 := strconv.ParseInt(sport, 10, 16)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||||
}
|
}
|
||||||
|
|
||||||
return host, int(port), nil
|
return shost, int(portInt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return host, 443, nil
|
return host, defaultHTTPSPort, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseHost(host string) (*Target, error) {
|
func ParseHost(host string) (*Target, error) {
|
||||||
host, port, err := parseURL(host)
|
uhost, port, err := parseURL(host)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &Target{Host: host, Port: port}, nil
|
return &Target{Host: uhost, Port: port}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
host, port, err = parseHostPort(host)
|
shost, port, err := parseHostPort(host)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &Target{Host: host, Port: port}, nil
|
return &Target{Host: shost, Port: port}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||||
|
|||||||
35
certlib/hosts/hosts_test.go
Normal file
35
certlib/hosts/hosts_test.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package hosts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
Host string
|
||||||
|
Target hosts.Target
|
||||||
|
}
|
||||||
|
|
||||||
|
var testCases = []testCase{
|
||||||
|
{Host: "server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||||
|
{Host: "server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||||
|
{Host: "tls://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||||
|
{Host: "https://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||||
|
{Host: "https://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||||
|
{Host: "tls://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||||
|
{Host: "https://server-name/something/else", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseHost(t *testing.T) {
|
||||||
|
for i, tc := range testCases {
|
||||||
|
target, err := hosts.ParseHost(tc.Host)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test case %d: %s", i+1, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.Host != tc.Target.Host {
|
||||||
|
t.Fatalf("test case %d: got host '%s', want host '%s'", i+1, target.Host, tc.Target.Host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
135
certlib/keymatch.go
Normal file
135
certlib/keymatch.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
package certlib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoadPrivateKey loads a private key from disk. It accepts both PEM and DER
|
||||||
|
// encodings and supports RSA and ECDSA keys. If the file contains a PEM block,
|
||||||
|
// the block type must be one of the recognised private key types.
|
||||||
|
func LoadPrivateKey(path string) (crypto.Signer, error) {
|
||||||
|
in, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
in = bytes.TrimSpace(in)
|
||||||
|
if p, _ := pem.Decode(in); p != nil {
|
||||||
|
if !validPEMs[p.Type] {
|
||||||
|
return nil, errors.New("invalid private key file type " + p.Type)
|
||||||
|
}
|
||||||
|
return ParsePrivateKeyPEM(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ParsePrivateKeyDER(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
var validPEMs = map[string]bool{
|
||||||
|
"PRIVATE KEY": true,
|
||||||
|
"RSA PRIVATE KEY": true,
|
||||||
|
"EC PRIVATE KEY": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
curveInvalid = iota // any invalid curve
|
||||||
|
curveRSA // indicates key is an RSA key, not an EC key
|
||||||
|
curveP256
|
||||||
|
curveP384
|
||||||
|
curveP521
|
||||||
|
)
|
||||||
|
|
||||||
|
func getECCurve(pub any) int {
|
||||||
|
switch pub := pub.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
switch pub.Curve {
|
||||||
|
case elliptic.P256():
|
||||||
|
return curveP256
|
||||||
|
case elliptic.P384():
|
||||||
|
return curveP384
|
||||||
|
case elliptic.P521():
|
||||||
|
return curveP521
|
||||||
|
default:
|
||||||
|
return curveInvalid
|
||||||
|
}
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
return curveRSA
|
||||||
|
default:
|
||||||
|
return curveInvalid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchRSA compares an RSA public key from certificate against RSA public key from private key.
|
||||||
|
// It returns true on match.
|
||||||
|
func matchRSA(certPub *rsa.PublicKey, keyPub *rsa.PublicKey) bool {
|
||||||
|
return keyPub.N.Cmp(certPub.N) == 0 && keyPub.E == certPub.E
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchECDSA compares ECDSA public keys for equality and compatible curve.
|
||||||
|
// It returns match=true when they are on the same curve and have the same X/Y.
|
||||||
|
// If curves mismatch, match is false.
|
||||||
|
func matchECDSA(certPub *ecdsa.PublicKey, keyPub *ecdsa.PublicKey) bool {
|
||||||
|
if getECCurve(certPub) != getECCurve(keyPub) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if keyPub.X.Cmp(certPub.X) != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if keyPub.Y.Cmp(certPub.Y) != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchKeys determines whether the certificate's public key matches the given private key.
|
||||||
|
// It returns true if they match; otherwise, it returns false and a human-friendly reason.
|
||||||
|
func MatchKeys(cert *x509.Certificate, priv crypto.Signer) (bool, string) {
|
||||||
|
switch keyPub := priv.Public().(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
switch certPub := cert.PublicKey.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
if matchRSA(certPub, keyPub) {
|
||||||
|
return true, ""
|
||||||
|
}
|
||||||
|
return false, "public keys don't match"
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
return false, "RSA private key, EC public key"
|
||||||
|
default:
|
||||||
|
return false, fmt.Sprintf("unsupported certificate public key type: %T", cert.PublicKey)
|
||||||
|
}
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
switch certPub := cert.PublicKey.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
if matchECDSA(certPub, keyPub) {
|
||||||
|
return true, ""
|
||||||
|
}
|
||||||
|
// Determine a more precise reason
|
||||||
|
kc := getECCurve(keyPub)
|
||||||
|
cc := getECCurve(certPub)
|
||||||
|
if kc == curveInvalid {
|
||||||
|
return false, "invalid private key curve"
|
||||||
|
}
|
||||||
|
if cc == curveRSA {
|
||||||
|
return false, "private key is EC, certificate is RSA"
|
||||||
|
}
|
||||||
|
if kc != cc {
|
||||||
|
return false, "EC curves don't match"
|
||||||
|
}
|
||||||
|
return false, "public keys don't match"
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
return false, "private key is EC, certificate is RSA"
|
||||||
|
default:
|
||||||
|
return false, fmt.Sprintf("unsupported certificate public key type: %T", cert.PublicKey)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false, fmt.Sprintf("unrecognised private key type: %T", priv.Public())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -93,7 +93,7 @@ type signedData struct {
|
|||||||
Version int
|
Version int
|
||||||
DigestAlgorithms asn1.RawValue
|
DigestAlgorithms asn1.RawValue
|
||||||
ContentInfo asn1.RawValue
|
ContentInfo asn1.RawValue
|
||||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
Certificates asn1.RawValue `asn1:"optional"`
|
||||||
Crls asn1.RawValue `asn1:"optional"`
|
Crls asn1.RawValue `asn1:"optional"`
|
||||||
SignerInfos asn1.RawValue
|
SignerInfos asn1.RawValue
|
||||||
}
|
}
|
||||||
@@ -158,9 +158,9 @@ type EncryptedContentInfo struct {
|
|||||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalInit(raw []byte) (init initPKCS7, err error) {
|
func unmarshalInit(raw []byte) (initPKCS7, error) {
|
||||||
_, err = asn1.Unmarshal(raw, &init)
|
var init initPKCS7
|
||||||
if err != nil {
|
if _, err := asn1.Unmarshal(raw, &init); err != nil {
|
||||||
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
return initPKCS7{}, certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
return init, nil
|
return init, nil
|
||||||
@@ -207,7 +207,10 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
|||||||
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
return certerr.ParsingError(certerr.ErrorSourceCertificate, err)
|
||||||
}
|
}
|
||||||
if ed.Version != 0 {
|
if ed.Version != 0 {
|
||||||
return certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS #7 encryptedData version 0 is supported"))
|
return certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("only PKCS #7 encryptedData version 0 is supported"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
msg.Content.EncryptedData = ed
|
msg.Content.EncryptedData = ed
|
||||||
return nil
|
return nil
|
||||||
@@ -215,34 +218,35 @@ func populateEncryptedData(msg *PKCS7, contentBytes []byte) error {
|
|||||||
|
|
||||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||||
// PKCS7 structure.
|
// PKCS7 structure.
|
||||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
func ParsePKCS7(raw []byte) (*PKCS7, error) {
|
||||||
|
|
||||||
pkcs7, err := unmarshalInit(raw)
|
pkcs7, err := unmarshalInit(raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = new(PKCS7)
|
msg := new(PKCS7)
|
||||||
msg.Raw = pkcs7.Raw
|
msg.Raw = pkcs7.Raw
|
||||||
msg.ContentInfo = pkcs7.ContentType.String()
|
msg.ContentInfo = pkcs7.ContentType.String()
|
||||||
|
|
||||||
switch msg.ContentInfo {
|
switch msg.ContentInfo {
|
||||||
case ObjIDData:
|
case ObjIDData:
|
||||||
if err := populateData(msg, pkcs7.Content); err != nil {
|
if e := populateData(msg, pkcs7.Content); e != nil {
|
||||||
return nil, err
|
return nil, e
|
||||||
}
|
}
|
||||||
case ObjIDSignedData:
|
case ObjIDSignedData:
|
||||||
if err := populateSignedData(msg, pkcs7.Content.Bytes); err != nil {
|
if e := populateSignedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||||
return nil, err
|
return nil, e
|
||||||
}
|
}
|
||||||
case ObjIDEncryptedData:
|
case ObjIDEncryptedData:
|
||||||
if err := populateEncryptedData(msg, pkcs7.Content.Bytes); err != nil {
|
if e := populateEncryptedData(msg, pkcs7.Content.Bytes); e != nil {
|
||||||
return nil, err
|
return nil, e
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, certerr.ParsingError(certerr.ErrorSourceCertificate, errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"))
|
return nil, certerr.ParsingError(
|
||||||
|
certerr.ErrorSourceCertificate,
|
||||||
|
errors.New("only PKCS# 7 content of type data, signed data or encrypted data can be parsed"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return msg, nil
|
return msg, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ package revoke
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
@@ -89,35 +90,35 @@ func ldapURL(url string) bool {
|
|||||||
// - false, false: an error was encountered while checking revocations.
|
// - false, false: an error was encountered while checking revocations.
|
||||||
// - false, true: the certificate was checked successfully, and it is not revoked.
|
// - false, true: the certificate was checked successfully, and it is not revoked.
|
||||||
// - true, true: the certificate was checked successfully, and it is revoked.
|
// - true, true: the certificate was checked successfully, and it is revoked.
|
||||||
// - true, false: failure to check revocation status causes verification to fail
|
// - true, false: failure to check revocation status causes verification to fail.
|
||||||
func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
func revCheck(cert *x509.Certificate) (bool, bool, error) {
|
||||||
for _, url := range cert.CRLDistributionPoints {
|
for _, url := range cert.CRLDistributionPoints {
|
||||||
if ldapURL(url) {
|
if ldapURL(url) {
|
||||||
log.Infof("skipping LDAP CRL: %s", url)
|
log.Infof("skipping LDAP CRL: %s", url)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if revoked, ok, err := certIsRevokedCRL(cert, url); !ok {
|
if rvk, ok2, err2 := certIsRevokedCRL(cert, url); !ok2 {
|
||||||
log.Warning("error checking revocation via CRL")
|
log.Warning("error checking revocation via CRL")
|
||||||
if HardFail {
|
if HardFail {
|
||||||
return true, false, err
|
return true, false, err2
|
||||||
}
|
}
|
||||||
return false, false, err
|
return false, false, err2
|
||||||
} else if revoked {
|
} else if rvk {
|
||||||
log.Info("certificate is revoked via CRL")
|
log.Info("certificate is revoked via CRL")
|
||||||
return true, true, err
|
return true, true, err2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if revoked, ok, err := certIsRevokedOCSP(cert, HardFail); !ok {
|
if rvk, ok2, err2 := certIsRevokedOCSP(cert, HardFail); !ok2 {
|
||||||
log.Warning("error checking revocation via OCSP")
|
log.Warning("error checking revocation via OCSP")
|
||||||
if HardFail {
|
if HardFail {
|
||||||
return true, false, err
|
return true, false, err2
|
||||||
}
|
}
|
||||||
return false, false, err
|
return false, false, err2
|
||||||
} else if revoked {
|
} else if rvk {
|
||||||
log.Info("certificate is revoked via OCSP")
|
log.Info("certificate is revoked via OCSP")
|
||||||
return true, true, err
|
return true, true, err2
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
@@ -125,13 +126,17 @@ func revCheck(cert *x509.Certificate) (revoked, ok bool, err error) {
|
|||||||
|
|
||||||
// fetchCRL fetches and parses a CRL.
|
// fetchCRL fetches and parses a CRL.
|
||||||
func fetchCRL(url string) (*x509.RevocationList, error) {
|
func fetchCRL(url string) (*x509.RevocationList, error) {
|
||||||
resp, err := HTTPClient.Get(url)
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := HTTPClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if resp.StatusCode >= 300 {
|
if resp.StatusCode >= http.StatusMultipleChoices {
|
||||||
return nil, errors.New("failed to retrieve CRL")
|
return nil, errors.New("failed to retrieve CRL")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,12 +159,11 @@ func getIssuer(cert *x509.Certificate) *x509.Certificate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return issuer
|
return issuer
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// check a cert against a specific CRL. Returns the same bool pair
|
// check a cert against a specific CRL. Returns the same bool pair
|
||||||
// as revCheck, plus an error if one occurred.
|
// as revCheck, plus an error if one occurred.
|
||||||
func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err error) {
|
func certIsRevokedCRL(cert *x509.Certificate, url string) (bool, bool, error) {
|
||||||
crlLock.Lock()
|
crlLock.Lock()
|
||||||
crl, ok := CRLSet[url]
|
crl, ok := CRLSet[url]
|
||||||
if ok && crl == nil {
|
if ok && crl == nil {
|
||||||
@@ -187,10 +191,9 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
|||||||
|
|
||||||
// check CRL signature
|
// check CRL signature
|
||||||
if issuer != nil {
|
if issuer != nil {
|
||||||
err = crl.CheckSignatureFrom(issuer)
|
if sigErr := crl.CheckSignatureFrom(issuer); sigErr != nil {
|
||||||
if err != nil {
|
log.Warningf("failed to verify CRL: %v", sigErr)
|
||||||
log.Warningf("failed to verify CRL: %v", err)
|
return false, false, sigErr
|
||||||
return false, false, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,40 +202,44 @@ func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool, err
|
|||||||
crlLock.Unlock()
|
crlLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, revoked := range crl.RevokedCertificates {
|
for _, entry := range crl.RevokedCertificateEntries {
|
||||||
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
|
if cert.SerialNumber.Cmp(entry.SerialNumber) == 0 {
|
||||||
log.Info("Serial number match: intermediate is revoked.")
|
log.Info("Serial number match: intermediate is revoked.")
|
||||||
return true, true, err
|
return true, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, true, err
|
return false, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyCertificate ensures that the certificate passed in hasn't
|
// VerifyCertificate ensures that the certificate passed in hasn't
|
||||||
// expired and checks the CRL for the server.
|
// expired and checks the CRL for the server.
|
||||||
func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
|
func VerifyCertificate(cert *x509.Certificate) (bool, bool) {
|
||||||
revoked, ok, _ = VerifyCertificateError(cert)
|
revoked, ok, _ := VerifyCertificateError(cert)
|
||||||
return revoked, ok
|
return revoked, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyCertificateError ensures that the certificate passed in hasn't
|
// VerifyCertificateError ensures that the certificate passed in hasn't
|
||||||
// expired and checks the CRL for the server.
|
// expired and checks the CRL for the server.
|
||||||
func VerifyCertificateError(cert *x509.Certificate) (revoked, ok bool, err error) {
|
func VerifyCertificateError(cert *x509.Certificate) (bool, bool, error) {
|
||||||
if !time.Now().Before(cert.NotAfter) {
|
if !time.Now().Before(cert.NotAfter) {
|
||||||
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
msg := fmt.Sprintf("Certificate expired %s\n", cert.NotAfter)
|
||||||
log.Info(msg)
|
log.Info(msg)
|
||||||
return true, true, errors.New(msg)
|
return true, true, errors.New(msg)
|
||||||
} else if !time.Now().After(cert.NotBefore) {
|
} else if !time.Now().After(cert.NotBefore) {
|
||||||
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
msg := fmt.Sprintf("Certificate isn't valid until %s\n", cert.NotBefore)
|
||||||
log.Info(msg)
|
log.Info(msg)
|
||||||
return true, true, errors.New(msg)
|
return true, true, errors.New(msg)
|
||||||
}
|
}
|
||||||
return revCheck(cert)
|
return revCheck(cert)
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchRemote(url string) (*x509.Certificate, error) {
|
func fetchRemote(url string) (*x509.Certificate, error) {
|
||||||
resp, err := HTTPClient.Get(url)
|
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := HTTPClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -255,8 +262,12 @@ var ocspOpts = ocsp.RequestOptions{
|
|||||||
Hash: crypto.SHA1,
|
Hash: crypto.SHA1,
|
||||||
}
|
}
|
||||||
|
|
||||||
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e error) {
|
const ocspGetURLMaxLen = 256
|
||||||
var err error
|
|
||||||
|
func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (bool, bool, error) {
|
||||||
|
var revoked bool
|
||||||
|
var ok bool
|
||||||
|
var lastErr error
|
||||||
|
|
||||||
ocspURLs := leaf.OCSPServer
|
ocspURLs := leaf.OCSPServer
|
||||||
if len(ocspURLs) == 0 {
|
if len(ocspURLs) == 0 {
|
||||||
@@ -272,15 +283,16 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
|
|
||||||
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return revoked, ok, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, server := range ocspURLs {
|
for _, server := range ocspURLs {
|
||||||
resp, err := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
resp, e := sendOCSPRequest(server, ocspRequest, leaf, issuer)
|
||||||
if err != nil {
|
if e != nil {
|
||||||
if strict {
|
if strict {
|
||||||
return revoked, ok, err
|
return false, false, e
|
||||||
}
|
}
|
||||||
|
lastErr = e
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,9 +304,9 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
revoked = true
|
revoked = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return revoked, ok, err
|
return revoked, ok, nil
|
||||||
}
|
}
|
||||||
return revoked, ok, err
|
return revoked, ok, lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendOCSPRequest attempts to request an OCSP response from the
|
// sendOCSPRequest attempts to request an OCSP response from the
|
||||||
@@ -303,12 +315,21 @@ func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool, e
|
|||||||
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate) (*ocsp.Response, error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var err error
|
var err error
|
||||||
if len(req) > 256 {
|
if len(req) > ocspGetURLMaxLen {
|
||||||
buf := bytes.NewBuffer(req)
|
buf := bytes.NewBuffer(req)
|
||||||
resp, err = HTTPClient.Post(server, "application/ocsp-request", buf)
|
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodPost, server, buf)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
httpReq.Header.Set("Content-Type", "application/ocsp-request")
|
||||||
|
resp, err = HTTPClient.Do(httpReq)
|
||||||
} else {
|
} else {
|
||||||
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
reqURL := server + "/" + neturl.QueryEscape(base64.StdEncoding.EncodeToString(req))
|
||||||
resp, err = HTTPClient.Get(reqURL)
|
httpReq, e := http.NewRequestWithContext(context.Background(), http.MethodGet, reqURL, nil)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
resp, err = HTTPClient.Do(httpReq)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -343,21 +364,21 @@ func sendOCSPRequest(server string, req []byte, leaf, issuer *x509.Certificate)
|
|||||||
|
|
||||||
var crlRead = io.ReadAll
|
var crlRead = io.ReadAll
|
||||||
|
|
||||||
// SetCRLFetcher sets the function to use to read from the http response body
|
// SetCRLFetcher sets the function to use to read from the http response body.
|
||||||
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetCRLFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
crlRead = fn
|
crlRead = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
var remoteRead = io.ReadAll
|
var remoteRead = io.ReadAll
|
||||||
|
|
||||||
// SetRemoteFetcher sets the function to use to read from the http response body
|
// SetRemoteFetcher sets the function to use to read from the http response body.
|
||||||
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetRemoteFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
remoteRead = fn
|
remoteRead = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
var ocspRead = io.ReadAll
|
var ocspRead = io.ReadAll
|
||||||
|
|
||||||
// SetOCSPFetcher sets the function to use to read from the http response body
|
// SetOCSPFetcher sets the function to use to read from the http response body.
|
||||||
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
func SetOCSPFetcher(fn func(io.Reader) ([]byte, error)) {
|
||||||
ocspRead = fn
|
ocspRead = fn
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//nolint:testpackage // keep tests in the same package for internal symbol access
|
||||||
package revoke
|
package revoke
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -50,7 +51,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
// to indicate that this is the case.
|
// to indicate that this is the case.
|
||||||
|
|
||||||
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
// 2014/05/22 14:18:17 Certificate expired 2014-04-04 14:14:20 +0000 UTC
|
||||||
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt
|
// 2014/05/22 14:18:17 Revoked certificate: misc/intermediate_ca/ActalisServerAuthenticationCA.crt.
|
||||||
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
var expiredCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||||
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
MIIEXTCCA8agAwIBAgIEBycURTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV
|
||||||
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
|
||||||
@@ -80,7 +81,7 @@ sESPRwHkcMUNdAp37FLweUw=
|
|||||||
|
|
||||||
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
// 2014/05/22 14:18:31 Serial number match: intermediate is revoked.
|
||||||
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
// 2014/05/22 14:18:31 certificate is revoked via CRL
|
||||||
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt
|
// 2014/05/22 14:18:31 Revoked certificate: misc/intermediate_ca/MobileArmorEnterpriseCA.crt.
|
||||||
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
var revokedCert = mustParse(`-----BEGIN CERTIFICATE-----
|
||||||
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
MIIEEzCCAvugAwIBAgILBAAAAAABGMGjftYwDQYJKoZIhvcNAQEFBQAwcTEoMCYG
|
||||||
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
A1UEAxMfR2xvYmFsU2lnbiBSb290U2lnbiBQYXJ0bmVycyBDQTEdMBsGA1UECxMU
|
||||||
@@ -106,7 +107,7 @@ Kz5vh+5tmytUPKA8hUgmLWe94lMb7Uqq2wgZKsqun5DAWleKu81w7wEcOrjiiB+x
|
|||||||
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
jeBHq7OnpWm+ccTOPCE6H4ZN4wWVS7biEBUdop/8HgXBPQHWAdjL
|
||||||
-----END CERTIFICATE-----`)
|
-----END CERTIFICATE-----`)
|
||||||
|
|
||||||
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url
|
// A Comodo intermediate CA certificate with issuer url, CRL url and OCSP url.
|
||||||
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
var goodComodoCA = (`-----BEGIN CERTIFICATE-----
|
||||||
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
MIIGCDCCA/CgAwIBAgIQKy5u6tl1NmwUim7bo3yMBzANBgkqhkiG9w0BAQwFADCB
|
||||||
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
|
||||||
@@ -153,7 +154,7 @@ func mustParse(pemData string) *x509.Certificate {
|
|||||||
panic("Invalid PEM type.")
|
panic("Invalid PEM type.")
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err := x509.ParseCertificate([]byte(block.Bytes))
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
panic(err.Error())
|
||||||
}
|
}
|
||||||
@@ -182,7 +183,6 @@ func TestGood(t *testing.T) {
|
|||||||
} else if revoked {
|
} else if revoked {
|
||||||
t.Fatalf("good certificate should not have been marked as revoked")
|
t.Fatalf("good certificate should not have been marked as revoked")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLdap(t *testing.T) {
|
func TestLdap(t *testing.T) {
|
||||||
@@ -230,7 +230,6 @@ func TestBadCRLSet(t *testing.T) {
|
|||||||
t.Fatalf("key emptystring should be deleted from CRLSet")
|
t.Fatalf("key emptystring should be deleted from CRLSet")
|
||||||
}
|
}
|
||||||
delete(CRLSet, "")
|
delete(CRLSet, "")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCachedCRLSet(t *testing.T) {
|
func TestCachedCRLSet(t *testing.T) {
|
||||||
@@ -241,13 +240,11 @@ func TestCachedCRLSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteFetchError(t *testing.T) {
|
func TestRemoteFetchError(t *testing.T) {
|
||||||
|
|
||||||
badurl := ":"
|
badurl := ":"
|
||||||
|
|
||||||
if _, err := fetchRemote(badurl); err == nil {
|
if _, err := fetchRemote(badurl); err == nil {
|
||||||
t.Fatalf("fetching bad url should result in non-nil error")
|
t.Fatalf("fetching bad url should result in non-nil error")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoOCSPServers(t *testing.T) {
|
func TestNoOCSPServers(t *testing.T) {
|
||||||
|
|||||||
157
certlib/ski/ski.go
Normal file
157
certlib/ski/ski.go
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
package ski
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha1" // #nosec G505 this is the standard
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/asn1"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
keyTypeRSA = "RSA"
|
||||||
|
keyTypeECDSA = "ECDSA"
|
||||||
|
keyTypeEd25519 = "Ed25519"
|
||||||
|
)
|
||||||
|
|
||||||
|
type subjectPublicKeyInfo struct {
|
||||||
|
Algorithm pkix.AlgorithmIdentifier
|
||||||
|
SubjectPublicKey asn1.BitString
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeyInfo struct {
|
||||||
|
PublicKey []byte
|
||||||
|
KeyType string
|
||||||
|
FileType string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KeyInfo) String() string {
|
||||||
|
return fmt.Sprintf("%s (%s)", lib.HexEncode(k.PublicKey, lib.HexEncodeLowerColon), k.KeyType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KeyInfo) SKI(displayMode lib.HexEncodeMode) (string, error) {
|
||||||
|
var subPKI subjectPublicKeyInfo
|
||||||
|
|
||||||
|
_, err := asn1.Unmarshal(k.PublicKey, &subPKI)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("serializing SKI: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) // #nosec G401 this is the standard
|
||||||
|
pubHashString := lib.HexEncode(pubHash[:], displayMode)
|
||||||
|
|
||||||
|
return pubHashString, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePEM parses a PEM file and returns the public key and its type.
|
||||||
|
func ParsePEM(path string) (*KeyInfo, error) {
|
||||||
|
material := &KeyInfo{}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing X.509 material %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data = bytes.TrimSpace(data)
|
||||||
|
p, rest := pem.Decode(data)
|
||||||
|
if len(rest) > 0 {
|
||||||
|
lib.Warnx("trailing data in PEM file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == nil {
|
||||||
|
return nil, fmt.Errorf("no PEM data in %s", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
data = p.Bytes
|
||||||
|
|
||||||
|
switch p.Type {
|
||||||
|
case "PRIVATE KEY", "RSA PRIVATE KEY", "EC PRIVATE KEY":
|
||||||
|
material.PublicKey, material.KeyType = parseKey(data)
|
||||||
|
material.FileType = "private key"
|
||||||
|
case "CERTIFICATE":
|
||||||
|
material.PublicKey, material.KeyType = parseCertificate(data)
|
||||||
|
material.FileType = "certificate"
|
||||||
|
case "CERTIFICATE REQUEST":
|
||||||
|
material.PublicKey, material.KeyType = parseCSR(data)
|
||||||
|
material.FileType = "certificate request"
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown PEM type %s", p.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
return material, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseKey(data []byte) ([]byte, string) {
|
||||||
|
priv, err := certlib.ParsePrivateKeyDER(data)
|
||||||
|
if err != nil {
|
||||||
|
die.If(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var kt string
|
||||||
|
switch priv.Public().(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
kt = keyTypeRSA
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
kt = keyTypeECDSA
|
||||||
|
default:
|
||||||
|
die.With("unknown private key type %T", priv)
|
||||||
|
}
|
||||||
|
|
||||||
|
public, err := x509.MarshalPKIXPublicKey(priv.Public())
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
return public, kt
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCertificate(data []byte) ([]byte, string) {
|
||||||
|
cert, err := x509.ParseCertificate(data)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
pub := cert.PublicKey
|
||||||
|
var kt string
|
||||||
|
switch pub.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
kt = keyTypeRSA
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
kt = keyTypeECDSA
|
||||||
|
case *ed25519.PublicKey:
|
||||||
|
kt = keyTypeEd25519
|
||||||
|
default:
|
||||||
|
die.With("unknown public key type %T", pub)
|
||||||
|
}
|
||||||
|
|
||||||
|
public, err := x509.MarshalPKIXPublicKey(pub)
|
||||||
|
die.If(err)
|
||||||
|
return public, kt
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCSR(data []byte) ([]byte, string) {
|
||||||
|
// Use certlib to support both PEM and DER and to centralize validation.
|
||||||
|
csr, _, err := certlib.ParseCSR(data)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
pub := csr.PublicKey
|
||||||
|
var kt string
|
||||||
|
switch pub.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
kt = keyTypeRSA
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
kt = keyTypeECDSA
|
||||||
|
default:
|
||||||
|
die.With("unknown public key type %T", pub)
|
||||||
|
}
|
||||||
|
|
||||||
|
public, err := x509.MarshalPKIXPublicKey(pub)
|
||||||
|
die.If(err)
|
||||||
|
return public, kt
|
||||||
|
}
|
||||||
49
certlib/verify/check.go
Normal file
49
certlib/verify/check.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package verify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/dump"
|
||||||
|
)
|
||||||
|
|
||||||
|
const DefaultLeeway = 2160 * time.Hour // three months
|
||||||
|
|
||||||
|
type CertCheck struct {
|
||||||
|
Cert *x509.Certificate
|
||||||
|
leeway time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCertCheck(cert *x509.Certificate, leeway time.Duration) *CertCheck {
|
||||||
|
return &CertCheck{
|
||||||
|
Cert: cert,
|
||||||
|
leeway: leeway,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CertCheck) Expiry() time.Duration {
|
||||||
|
return time.Until(c.Cert.NotAfter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CertCheck) IsExpiring(leeway time.Duration) bool {
|
||||||
|
return c.Expiry() < leeway
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns nil if the certificate is not expiring within the leeway period.
|
||||||
|
func (c CertCheck) Err() error {
|
||||||
|
if !c.IsExpiring(c.leeway) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%s expires in %s", dump.DisplayName(c.Cert.Subject), c.Expiry())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CertCheck) Name() string {
|
||||||
|
return fmt.Sprintf("%s/SN=%s", dump.DisplayName(c.Cert.Subject),
|
||||||
|
c.Cert.SerialNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CertCheck) String() string {
|
||||||
|
return fmt.Sprintf("%s expires on %s (in %s)\n", c.Name(), c.Cert.NotAfter, c.Expiry())
|
||||||
|
}
|
||||||
143
certlib/verify/verify.go
Normal file
143
certlib/verify/verify.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package verify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
func bundleIntermediates(w io.Writer, chain []*x509.Certificate, pool *x509.CertPool, verbose bool) *x509.CertPool {
|
||||||
|
for _, intermediate := range chain[1:] {
|
||||||
|
if verbose {
|
||||||
|
fmt.Fprintf(w, "[+] adding intermediate with SKI %x\n", intermediate.SubjectKeyId)
|
||||||
|
}
|
||||||
|
pool.AddCert(intermediate)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Opts struct {
|
||||||
|
Verbose bool
|
||||||
|
Config *tls.Config
|
||||||
|
Intermediates *x509.CertPool
|
||||||
|
ForceIntermediates bool
|
||||||
|
CheckRevocation bool
|
||||||
|
KeyUsages []x509.ExtKeyUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
type verifyResult struct {
|
||||||
|
chain []*x509.Certificate
|
||||||
|
roots *x509.CertPool
|
||||||
|
ints *x509.CertPool
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareVerification(w io.Writer, target string, opts *Opts) (*verifyResult, error) {
|
||||||
|
var (
|
||||||
|
roots, ints *x509.CertPool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if opts == nil {
|
||||||
|
opts = &Opts{
|
||||||
|
Config: lib.StrictBaselineTLSConfig(),
|
||||||
|
ForceIntermediates: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Config.RootCAs == nil {
|
||||||
|
roots, err = x509.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't load system cert pool: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.Config.RootCAs = roots
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Intermediates == nil {
|
||||||
|
ints = x509.NewCertPool()
|
||||||
|
} else {
|
||||||
|
ints = opts.Intermediates.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
roots = opts.Config.RootCAs.Clone()
|
||||||
|
|
||||||
|
chain, err := lib.GetCertificateChain(target, opts.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fetching certificate chain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Verbose {
|
||||||
|
fmt.Fprintf(w, "[+] %s has %d certificates\n", target, len(chain))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(chain) > 1 && opts.ForceIntermediates {
|
||||||
|
ints = bundleIntermediates(w, chain, ints, opts.Verbose)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &verifyResult{
|
||||||
|
chain: chain,
|
||||||
|
roots: roots,
|
||||||
|
ints: ints,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chain fetches the certificate chain for a target and verifies it.
|
||||||
|
func Chain(w io.Writer, target string, opts *Opts) ([]*x509.Certificate, error) {
|
||||||
|
result, err := prepareVerification(w, target, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate verification failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chains, err := CertWith(result.chain[0], result.roots, result.ints, opts.CheckRevocation, opts.KeyUsages...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate verification failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return chains, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CertWith verifies a certificate against a set of roots and intermediates.
|
||||||
|
func CertWith(
|
||||||
|
cert *x509.Certificate,
|
||||||
|
roots, ints *x509.CertPool,
|
||||||
|
checkRevocation bool,
|
||||||
|
keyUses ...x509.ExtKeyUsage,
|
||||||
|
) ([]*x509.Certificate, error) {
|
||||||
|
if len(keyUses) == 0 {
|
||||||
|
keyUses = []x509.ExtKeyUsage{x509.ExtKeyUsageAny}
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := x509.VerifyOptions{
|
||||||
|
Intermediates: ints,
|
||||||
|
Roots: roots,
|
||||||
|
KeyUsages: keyUses,
|
||||||
|
}
|
||||||
|
|
||||||
|
chains, err := cert.Verify(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if checkRevocation {
|
||||||
|
revoked, ok := revoke.VerifyCertificate(cert)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("failed to check certificate revocation status")
|
||||||
|
}
|
||||||
|
|
||||||
|
if revoked {
|
||||||
|
return nil, errors.New("certificate is revoked")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(chains) == 0 {
|
||||||
|
return nil, errors.New("no valid certificate chain found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return chains[0], nil
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@@ -28,10 +29,16 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
|||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
fmt.Printf("connecting to %s/%s... ", addr, proto)
|
||||||
os.Stdout.Sync()
|
if err = os.Stdout.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := net.DialTimeout(proto, addr, timeout)
|
dialer := &net.Dialer{
|
||||||
|
Timeout: timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dialer.DialContext(context.Background(), proto, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Println("failed.")
|
fmt.Println("failed.")
|
||||||
@@ -42,8 +49,8 @@ func connect(addr string, dport string, six bool, timeout time.Duration) error {
|
|||||||
if verbose {
|
if verbose {
|
||||||
fmt.Println("OK")
|
fmt.Println("OK")
|
||||||
}
|
}
|
||||||
conn.Close()
|
|
||||||
return nil
|
return conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"embed"
|
"embed"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -14,22 +15,22 @@ import (
|
|||||||
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
// loadCertsFromFile attempts to parse certificates from a file that may be in
|
||||||
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
// PEM or DER/PKCS#7 format. Returns the parsed certificates or an error.
|
||||||
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
func loadCertsFromFile(path string) ([]*x509.Certificate, error) {
|
||||||
|
var certs []*x509.Certificate
|
||||||
|
|
||||||
data, err := os.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try PEM first
|
if certs, err = certlib.ParseCertificatesPEM(data); err == nil {
|
||||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
|
||||||
return certs, nil
|
return certs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try DER/PKCS7/PKCS12 (with no password)
|
if certs, _, err = certlib.ParseCertificatesDER(data, ""); err == nil {
|
||||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
|
||||||
return certs, nil
|
return certs, nil
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
func makePoolFromFile(path string) (*x509.CertPool, error) {
|
||||||
@@ -56,49 +57,50 @@ var embeddedTestdata embed.FS
|
|||||||
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
// loadCertsFromBytes attempts to parse certificates from bytes that may be in
|
||||||
// PEM or DER/PKCS#7 format.
|
// PEM or DER/PKCS#7 format.
|
||||||
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
func loadCertsFromBytes(data []byte) ([]*x509.Certificate, error) {
|
||||||
// Try PEM first
|
certs, err := certlib.ParseCertificatesPEM(data)
|
||||||
if certs, err := certlib.ParseCertificatesPEM(data); err == nil {
|
if err == nil {
|
||||||
return certs, nil
|
return certs, nil
|
||||||
}
|
}
|
||||||
// Try DER/PKCS7/PKCS12 (with no password)
|
|
||||||
if certs, _, err := certlib.ParseCertificatesDER(data, ""); err == nil {
|
certs, _, err = certlib.ParseCertificatesDER(data, "")
|
||||||
|
if err == nil {
|
||||||
return certs, nil
|
return certs, nil
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
func makePoolFromBytes(data []byte) (*x509.CertPool, error) {
|
||||||
certs, err := loadCertsFromBytes(data)
|
certs, err := loadCertsFromBytes(data)
|
||||||
if err != nil || len(certs) == 0 {
|
if err != nil || len(certs) == 0 {
|
||||||
return nil, fmt.Errorf("failed to load CA certificates from embedded bytes")
|
return nil, errors.New("failed to load CA certificates from embedded bytes")
|
||||||
}
|
}
|
||||||
pool := x509.NewCertPool()
|
pool := x509.NewCertPool()
|
||||||
for _, c := range certs {
|
for _, c := range certs {
|
||||||
pool.AddCert(c)
|
pool.AddCert(c)
|
||||||
}
|
}
|
||||||
return pool, nil
|
return pool, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isSelfSigned returns true if the given certificate is self-signed.
|
// isSelfSigned returns true if the given certificate is self-signed.
|
||||||
// It checks that the subject and issuer match and that the certificate's
|
// It checks that the subject and issuer match and that the certificate's
|
||||||
// signature verifies against its own public key.
|
// signature verifies against its own public key.
|
||||||
func isSelfSigned(cert *x509.Certificate) bool {
|
func isSelfSigned(cert *x509.Certificate) bool {
|
||||||
if cert == nil {
|
if cert == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Quick check: subject and issuer match
|
// Quick check: subject and issuer match
|
||||||
if cert.Subject.String() != cert.Issuer.String() {
|
if cert.Subject.String() != cert.Issuer.String() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Cryptographic check: the certificate is signed by itself
|
// Cryptographic check: the certificate is signed by itself
|
||||||
if err := cert.CheckSignatureFrom(cert); err != nil {
|
if err := cert.CheckSignatureFrom(cert); err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string) {
|
func verifyAgainstCA(caPool *x509.CertPool, path string) (bool, string) {
|
||||||
certs, err := loadCertsFromFile(path)
|
certs, err := loadCertsFromFile(path)
|
||||||
if err != nil || len(certs) == 0 {
|
if err != nil || len(certs) == 0 {
|
||||||
return false, ""
|
return false, ""
|
||||||
@@ -117,14 +119,14 @@ func verifyAgainstCA(caPool *x509.CertPool, path string) (ok bool, expiry string
|
|||||||
Intermediates: ints,
|
Intermediates: ints,
|
||||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||||
}
|
}
|
||||||
if _, err := leaf.Verify(opts); err != nil {
|
if _, err = leaf.Verify(opts); err != nil {
|
||||||
return false, ""
|
return false, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, leaf.NotAfter.Format("2006-01-02")
|
return true, leaf.NotAfter.Format("2006-01-02")
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expiry string) {
|
func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (bool, string) {
|
||||||
certs, err := loadCertsFromBytes(certData)
|
certs, err := loadCertsFromBytes(certData)
|
||||||
if err != nil || len(certs) == 0 {
|
if err != nil || len(certs) == 0 {
|
||||||
return false, ""
|
return false, ""
|
||||||
@@ -143,92 +145,159 @@ func verifyAgainstCABytes(caPool *x509.CertPool, certData []byte) (ok bool, expi
|
|||||||
Intermediates: ints,
|
Intermediates: ints,
|
||||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||||
}
|
}
|
||||||
if _, err := leaf.Verify(opts); err != nil {
|
if _, err = leaf.Verify(opts); err != nil {
|
||||||
return false, ""
|
return false, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, leaf.NotAfter.Format("2006-01-02")
|
return true, leaf.NotAfter.Format("2006-01-02")
|
||||||
}
|
}
|
||||||
|
|
||||||
// selftest runs built-in validation using embedded certificates.
|
type testCase struct {
|
||||||
func selftest() int {
|
name string
|
||||||
type testCase struct {
|
caFile string
|
||||||
name string
|
certFile string
|
||||||
caFile string
|
expectOK bool
|
||||||
certFile string
|
}
|
||||||
expectOK bool
|
|
||||||
|
func (tc testCase) Run() error {
|
||||||
|
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.caFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cases := []testCase{
|
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
||||||
{name: "ISRG Root X1 validates LE E7", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/le-e7.pem", expectOK: true},
|
if err != nil {
|
||||||
{name: "ISRG Root X1 does NOT validate Google WR2", caFile: "testdata/isrg-root-x1.pem", certFile: "testdata/goog-wr2.pem", expectOK: false},
|
return fmt.Errorf("selftest: failed to read embedded %s: %w", tc.certFile, err)
|
||||||
{name: "GTS R1 validates Google WR2", caFile: "testdata/gts-r1.pem", certFile: "testdata/goog-wr2.pem", expectOK: true},
|
}
|
||||||
{name: "GTS R1 does NOT validate LE E7", caFile: "testdata/gts-r1.pem", certFile: "testdata/le-e7.pem", expectOK: false},
|
|
||||||
}
|
|
||||||
|
|
||||||
failures := 0
|
pool, err := makePoolFromBytes(caBytes)
|
||||||
for _, tc := range cases {
|
if err != nil || pool == nil {
|
||||||
caBytes, err := embeddedTestdata.ReadFile(tc.caFile)
|
return fmt.Errorf("selftest: failed to build CA pool for %s: %w", tc.caFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
||||||
|
if ok != tc.expectOK {
|
||||||
|
return fmt.Errorf("%s: unexpected result: got %v, want %v", tc.name, ok, tc.expectOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cases = []testCase{
|
||||||
|
{
|
||||||
|
name: "ISRG Root X1 validates LE E7",
|
||||||
|
caFile: "testdata/isrg-root-x1.pem",
|
||||||
|
certFile: "testdata/le-e7.pem",
|
||||||
|
expectOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ISRG Root X1 does NOT validate Google WR2",
|
||||||
|
caFile: "testdata/isrg-root-x1.pem",
|
||||||
|
certFile: "testdata/goog-wr2.pem",
|
||||||
|
expectOK: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GTS R1 validates Google WR2",
|
||||||
|
caFile: "testdata/gts-r1.pem",
|
||||||
|
certFile: "testdata/goog-wr2.pem",
|
||||||
|
expectOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "GTS R1 does NOT validate LE E7",
|
||||||
|
caFile: "testdata/gts-r1.pem",
|
||||||
|
certFile: "testdata/le-e7.pem",
|
||||||
|
expectOK: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// selftest runs built-in validation using embedded certificates.
|
||||||
|
func selftest() int {
|
||||||
|
failures := 0
|
||||||
|
for _, tc := range cases {
|
||||||
|
err := tc.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.caFile, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
failures++
|
failures++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
certBytes, err := embeddedTestdata.ReadFile(tc.certFile)
|
}
|
||||||
|
|
||||||
|
// Verify that both embedded root CAs are detected as self-signed
|
||||||
|
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
||||||
|
for _, root := range roots {
|
||||||
|
b, err := embeddedTestdata.ReadFile(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", tc.certFile, err)
|
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
||||||
failures++
|
failures++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pool, err := makePoolFromBytes(caBytes)
|
certs, err := loadCertsFromBytes(b)
|
||||||
if err != nil || pool == nil {
|
if err != nil || len(certs) == 0 {
|
||||||
fmt.Fprintf(os.Stderr, "selftest: failed to build CA pool for %s: %v\n", tc.caFile, err)
|
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
||||||
failures++
|
failures++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ok, exp := verifyAgainstCABytes(pool, certBytes)
|
leaf := certs[0]
|
||||||
if ok != tc.expectOK {
|
if isSelfSigned(leaf) {
|
||||||
fmt.Printf("%s: unexpected result: got %v, want %v\n", tc.name, ok, tc.expectOK)
|
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
||||||
failures++
|
|
||||||
} else {
|
} else {
|
||||||
if ok {
|
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
||||||
fmt.Printf("%s: OK (expires %s)\n", tc.name, exp)
|
failures++
|
||||||
} else {
|
|
||||||
fmt.Printf("%s: INVALID (as expected)\n", tc.name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that both embedded root CAs are detected as self-signed
|
if failures == 0 {
|
||||||
roots := []string{"testdata/gts-r1.pem", "testdata/isrg-root-x1.pem"}
|
fmt.Println("selftest: PASS")
|
||||||
for _, root := range roots {
|
return 0
|
||||||
b, err := embeddedTestdata.ReadFile(root)
|
}
|
||||||
if err != nil {
|
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
||||||
fmt.Fprintf(os.Stderr, "selftest: failed to read embedded %s: %v\n", root, err)
|
return 1
|
||||||
failures++
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
certs, err := loadCertsFromBytes(b)
|
|
||||||
if err != nil || len(certs) == 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "selftest: failed to parse cert(s) from %s: %v\n", root, err)
|
|
||||||
failures++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
leaf := certs[0]
|
|
||||||
if isSelfSigned(leaf) {
|
|
||||||
fmt.Printf("%s: SELF-SIGNED (as expected)\n", root)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s: expected SELF-SIGNED, but was not detected as such\n", root)
|
|
||||||
failures++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if failures == 0 {
|
// expiryString returns a YYYY-MM-DD date string to display for certificate
|
||||||
fmt.Println("selftest: PASS")
|
// expiry. If an explicit exp string is provided, it is used. Otherwise, if a
|
||||||
return 0
|
// leaf certificate is available, its NotAfter is formatted. As a last resort,
|
||||||
}
|
// it falls back to today's date (should not normally happen).
|
||||||
fmt.Fprintf(os.Stderr, "selftest: FAIL (%d failure(s))\n", failures)
|
func expiryString(leaf *x509.Certificate, exp string) string {
|
||||||
return 1
|
if exp != "" {
|
||||||
|
return exp
|
||||||
|
}
|
||||||
|
if leaf != nil {
|
||||||
|
return leaf.NotAfter.Format("2006-01-02")
|
||||||
|
}
|
||||||
|
return time.Now().Format("2006-01-02")
|
||||||
|
}
|
||||||
|
|
||||||
|
// processCert verifies a single certificate file against the provided CA pool
|
||||||
|
// and prints the result in the required format, handling self-signed
|
||||||
|
// certificates specially.
|
||||||
|
func processCert(caPool *x509.CertPool, certPath string) {
|
||||||
|
ok, exp := verifyAgainstCA(caPool, certPath)
|
||||||
|
name := filepath.Base(certPath)
|
||||||
|
|
||||||
|
// Try to load the leaf cert for self-signed detection and expiry fallback
|
||||||
|
var leaf *x509.Certificate
|
||||||
|
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
||||||
|
leaf = certs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefer the SELF-SIGNED label if applicable
|
||||||
|
if isSelfSigned(leaf) {
|
||||||
|
fmt.Printf("%s: SELF-SIGNED\n", name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
fmt.Printf("%s: OK (expires %s)\n", name, expiryString(leaf, exp))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("%s: INVALID\n", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -250,38 +319,7 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, certPath := range os.Args[2:] {
|
for _, certPath := range os.Args[2:] {
|
||||||
ok, exp := verifyAgainstCA(caPool, certPath)
|
processCert(caPool, certPath)
|
||||||
name := filepath.Base(certPath)
|
}
|
||||||
// Load the leaf once for self-signed detection and potential expiry fallback
|
|
||||||
var leaf *x509.Certificate
|
|
||||||
if certs, err := loadCertsFromFile(certPath); err == nil && len(certs) > 0 {
|
|
||||||
leaf = certs[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the certificate is self-signed, prefer the SELF-SIGNED label
|
|
||||||
if isSelfSigned(leaf) {
|
|
||||||
fmt.Printf("%s: SELF-SIGNED\n", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
// Display with the requested format
|
|
||||||
// Example: file: OK (expires 2031-01-01)
|
|
||||||
// Ensure deterministic date formatting
|
|
||||||
// Note: no timezone displayed; date only as per example
|
|
||||||
// If exp ended up empty for some reason, recompute safely
|
|
||||||
if exp == "" {
|
|
||||||
if leaf != nil {
|
|
||||||
exp = leaf.NotAfter.Format("2006-01-02")
|
|
||||||
} else {
|
|
||||||
// fallback to the current date to avoid empty; though shouldn't happen
|
|
||||||
exp = time.Now().Format("2006-01-02")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("%s: OK (expires %s)\n", name, exp)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s: INVALID\n", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
28
cmd/cert-bundler/Dockerfile
Normal file
28
cmd/cert-bundler/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Build and runtime image for cert-bundler
|
||||||
|
# Usage (from repo root or cmd/cert-bundler directory):
|
||||||
|
# docker build -t cert-bundler:latest -f cmd/cert-bundler/Dockerfile .
|
||||||
|
# docker run --rm -v "$PWD":/work cert-bundler:latest
|
||||||
|
# This expects a /work/bundle.yaml file in the mounted directory and
|
||||||
|
# will write generated bundles to /work/bundle.
|
||||||
|
|
||||||
|
# Build stage
|
||||||
|
FROM golang:1.24.3-alpine AS build
|
||||||
|
WORKDIR /src
|
||||||
|
|
||||||
|
# Copy go module files and download dependencies first for better caching
|
||||||
|
RUN go install git.wntrmute.dev/kyle/goutils/cmd/cert-bundler@v1.13.2 && \
|
||||||
|
mv /go/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||||
|
|
||||||
|
# Runtime stage (kept as golang:alpine per requirement)
|
||||||
|
FROM golang:1.24.3-alpine
|
||||||
|
|
||||||
|
# Create a work directory that users will typically mount into
|
||||||
|
WORKDIR /work
|
||||||
|
VOLUME ["/work"]
|
||||||
|
|
||||||
|
# Copy the built binary from the builder stage
|
||||||
|
COPY --from=build /usr/local/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||||
|
|
||||||
|
# Default command: read bundle.yaml from current directory and output to ./bundle
|
||||||
|
ENTRYPOINT ["/usr/local/bin/cert-bundler"]
|
||||||
|
CMD ["-c", "/work/bundle.yaml", "-o", "/work/bundle"]
|
||||||
@@ -1,64 +1,19 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/x509"
|
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"encoding/pem"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
"git.wntrmute.dev/kyle/goutils/certlib/bundler"
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config represents the top-level YAML configuration
|
|
||||||
type Config struct {
|
|
||||||
Config struct {
|
|
||||||
Hashes string `yaml:"hashes"`
|
|
||||||
Expiry string `yaml:"expiry"`
|
|
||||||
} `yaml:"config"`
|
|
||||||
Chains map[string]ChainGroup `yaml:"chains"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainGroup represents a named group of certificate chains
|
|
||||||
type ChainGroup struct {
|
|
||||||
Certs []CertChain `yaml:"certs"`
|
|
||||||
Outputs Outputs `yaml:"outputs"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CertChain represents a root certificate and its intermediates
|
|
||||||
type CertChain struct {
|
|
||||||
Root string `yaml:"root"`
|
|
||||||
Intermediates []string `yaml:"intermediates"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Outputs defines output format options
|
|
||||||
type Outputs struct {
|
|
||||||
IncludeSingle bool `yaml:"include_single"`
|
|
||||||
IncludeIndividual bool `yaml:"include_individual"`
|
|
||||||
Manifest bool `yaml:"manifest"`
|
|
||||||
Formats []string `yaml:"formats"`
|
|
||||||
Encoding string `yaml:"encoding"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
configFile string
|
configFile string
|
||||||
outputDir string
|
outputDir string
|
||||||
)
|
)
|
||||||
|
|
||||||
var formatExtensions = map[string]string{
|
|
||||||
"zip": ".zip",
|
|
||||||
"tgz": ".tar.gz",
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:embed README.txt
|
//go:embed README.txt
|
||||||
var readmeContent string
|
var readmeContent string
|
||||||
|
|
||||||
@@ -77,452 +32,10 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load and parse configuration
|
if err := bundler.Run(configFile, outputDir); err != nil {
|
||||||
cfg, err := loadConfig(configFile)
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse expiry duration (default 1 year)
|
|
||||||
expiryDuration := 365 * 24 * time.Hour
|
|
||||||
if cfg.Config.Expiry != "" {
|
|
||||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create output directory if it doesn't exist
|
|
||||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process each chain group
|
|
||||||
// Pre-allocate createdFiles based on total number of formats across all groups
|
|
||||||
totalFormats := 0
|
|
||||||
for _, group := range cfg.Chains {
|
|
||||||
totalFormats += len(group.Outputs.Formats)
|
|
||||||
}
|
|
||||||
createdFiles := make([]string, 0, totalFormats)
|
|
||||||
for groupName, group := range cfg.Chains {
|
|
||||||
files, err := processChainGroup(groupName, group, expiryDuration)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
createdFiles = append(createdFiles, files...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate hash file for all created archives
|
|
||||||
if cfg.Config.Hashes != "" {
|
|
||||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
|
||||||
if err := generateHashFile(hashFile, createdFiles); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Certificate bundling completed successfully")
|
fmt.Println("Certificate bundling completed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadConfig(path string) (*Config, error) {
|
|
||||||
data, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var cfg Config
|
|
||||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDuration(s string) (time.Duration, error) {
|
|
||||||
// Support simple formats like "1y", "6m", "30d"
|
|
||||||
if len(s) < 2 {
|
|
||||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
unit := s[len(s)-1]
|
|
||||||
value := s[:len(s)-1]
|
|
||||||
|
|
||||||
var multiplier time.Duration
|
|
||||||
switch unit {
|
|
||||||
case 'y', 'Y':
|
|
||||||
multiplier = 365 * 24 * time.Hour
|
|
||||||
case 'm', 'M':
|
|
||||||
multiplier = 30 * 24 * time.Hour
|
|
||||||
case 'd', 'D':
|
|
||||||
multiplier = 24 * time.Hour
|
|
||||||
default:
|
|
||||||
return time.ParseDuration(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
var num int
|
|
||||||
_, err := fmt.Sscanf(value, "%d", &num)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Duration(num) * multiplier, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
|
|
||||||
// Default encoding to "pem" if not specified
|
|
||||||
encoding := group.Outputs.Encoding
|
|
||||||
if encoding == "" {
|
|
||||||
encoding = "pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect certificates from all chains in the group
|
|
||||||
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare files for inclusion in archives
|
|
||||||
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create archives for the entire group
|
|
||||||
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return createdFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing
|
|
||||||
func loadAndCollectCerts(chains []CertChain, outputs Outputs, expiryDuration time.Duration) ([]*x509.Certificate, []certWithPath, error) {
|
|
||||||
var singleFileCerts []*x509.Certificate
|
|
||||||
var individualCerts []certWithPath
|
|
||||||
|
|
||||||
for _, chain := range chains {
|
|
||||||
// Load root certificate
|
|
||||||
rootCert, err := certlib.LoadCertificate(chain.Root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %v", chain.Root, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check expiry for root
|
|
||||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
|
||||||
|
|
||||||
// Add root to collections if needed
|
|
||||||
if outputs.IncludeSingle {
|
|
||||||
singleFileCerts = append(singleFileCerts, rootCert)
|
|
||||||
}
|
|
||||||
if outputs.IncludeIndividual {
|
|
||||||
individualCerts = append(individualCerts, certWithPath{
|
|
||||||
cert: rootCert,
|
|
||||||
path: chain.Root,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load and validate intermediates
|
|
||||||
for _, intPath := range chain.Intermediates {
|
|
||||||
intCert, err := certlib.LoadCertificate(intPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %v", intPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that intermediate is signed by root
|
|
||||||
if err := intCert.CheckSignatureFrom(rootCert); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("intermediate %s is not properly signed by root %s: %v", intPath, chain.Root, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check expiry for intermediate
|
|
||||||
checkExpiry(intPath, intCert, expiryDuration)
|
|
||||||
|
|
||||||
// Add intermediate to collections if needed
|
|
||||||
if outputs.IncludeSingle {
|
|
||||||
singleFileCerts = append(singleFileCerts, intCert)
|
|
||||||
}
|
|
||||||
if outputs.IncludeIndividual {
|
|
||||||
individualCerts = append(individualCerts, certWithPath{
|
|
||||||
cert: intCert,
|
|
||||||
path: intPath,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return singleFileCerts, individualCerts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareArchiveFiles prepares all files to be included in archives
|
|
||||||
func prepareArchiveFiles(singleFileCerts []*x509.Certificate, individualCerts []certWithPath, outputs Outputs, encoding string) ([]fileEntry, error) {
|
|
||||||
var archiveFiles []fileEntry
|
|
||||||
|
|
||||||
// Handle a single bundle file
|
|
||||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
|
||||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to encode single bundle: %v", err)
|
|
||||||
}
|
|
||||||
archiveFiles = append(archiveFiles, files...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle individual files
|
|
||||||
if outputs.IncludeIndividual {
|
|
||||||
for _, cp := range individualCerts {
|
|
||||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
|
||||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to encode individual cert %s: %v", cp.path, err)
|
|
||||||
}
|
|
||||||
archiveFiles = append(archiveFiles, files...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate manifest if requested
|
|
||||||
if outputs.Manifest {
|
|
||||||
manifestContent := generateManifest(archiveFiles)
|
|
||||||
archiveFiles = append(archiveFiles, fileEntry{
|
|
||||||
name: "MANIFEST",
|
|
||||||
content: manifestContent,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return archiveFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createArchiveFiles creates archive files in the specified formats
|
|
||||||
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
|
|
||||||
createdFiles := make([]string, 0, len(formats))
|
|
||||||
|
|
||||||
for _, format := range formats {
|
|
||||||
ext, ok := formatExtensions[format]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
|
||||||
}
|
|
||||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
|
||||||
switch format {
|
|
||||||
case "zip":
|
|
||||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create zip archive: %v", err)
|
|
||||||
}
|
|
||||||
case "tgz":
|
|
||||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create tar.gz archive: %v", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
|
||||||
}
|
|
||||||
createdFiles = append(createdFiles, archivePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return createdFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
|
||||||
now := time.Now()
|
|
||||||
expiryThreshold := now.Add(expiryDuration)
|
|
||||||
|
|
||||||
if cert.NotAfter.Before(expiryThreshold) {
|
|
||||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
|
||||||
if daysUntilExpiry < 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s has EXPIRED (expired %d days ago)\n", path, -daysUntilExpiry)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileEntry struct {
|
|
||||||
name string
|
|
||||||
content []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type certWithPath struct {
|
|
||||||
cert *x509.Certificate
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
|
||||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file
|
|
||||||
func encodeCertsToFiles(certs []*x509.Certificate, baseName string, encoding string, isSingle bool) ([]fileEntry, error) {
|
|
||||||
var files []fileEntry
|
|
||||||
|
|
||||||
switch encoding {
|
|
||||||
case "pem":
|
|
||||||
pemContent := encodeCertsToPEM(certs)
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".pem",
|
|
||||||
content: pemContent,
|
|
||||||
})
|
|
||||||
case "der":
|
|
||||||
if isSingle {
|
|
||||||
// For single file in DER, concatenate all cert DER bytes
|
|
||||||
var derContent []byte
|
|
||||||
for _, cert := range certs {
|
|
||||||
derContent = append(derContent, cert.Raw...)
|
|
||||||
}
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".crt",
|
|
||||||
content: derContent,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// Individual DER file (should only have one cert)
|
|
||||||
if len(certs) > 0 {
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".crt",
|
|
||||||
content: certs[0].Raw,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "both":
|
|
||||||
// Add PEM version
|
|
||||||
pemContent := encodeCertsToPEM(certs)
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".pem",
|
|
||||||
content: pemContent,
|
|
||||||
})
|
|
||||||
// Add DER version
|
|
||||||
if isSingle {
|
|
||||||
var derContent []byte
|
|
||||||
for _, cert := range certs {
|
|
||||||
derContent = append(derContent, cert.Raw...)
|
|
||||||
}
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".crt",
|
|
||||||
content: derContent,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
if len(certs) > 0 {
|
|
||||||
files = append(files, fileEntry{
|
|
||||||
name: baseName + ".crt",
|
|
||||||
content: certs[0].Raw,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeCertsToPEM encodes certificates to PEM format
|
|
||||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
|
||||||
var pemContent []byte
|
|
||||||
for _, cert := range certs {
|
|
||||||
pemBlock := &pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: cert.Raw,
|
|
||||||
}
|
|
||||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
|
||||||
}
|
|
||||||
return pemContent
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateManifest(files []fileEntry) []byte {
|
|
||||||
var manifest strings.Builder
|
|
||||||
for _, file := range files {
|
|
||||||
if file.name == "MANIFEST" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hash := sha256.Sum256(file.content)
|
|
||||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
|
||||||
}
|
|
||||||
return []byte(manifest.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func createZipArchive(path string, files []fileEntry) error {
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w := zip.NewWriter(f)
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
fw, err := w.Create(file.name)
|
|
||||||
if err != nil {
|
|
||||||
w.Close()
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := fw.Write(file.content); err != nil {
|
|
||||||
w.Close()
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check errors on close operations
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func createTarGzArchive(path string, files []fileEntry) error {
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
gw := gzip.NewWriter(f)
|
|
||||||
tw := tar.NewWriter(gw)
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Name: file.name,
|
|
||||||
Mode: 0644,
|
|
||||||
Size: int64(len(file.content)),
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(hdr); err != nil {
|
|
||||||
tw.Close()
|
|
||||||
gw.Close()
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := tw.Write(file.content); err != nil {
|
|
||||||
tw.Close()
|
|
||||||
gw.Close()
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check errors on close operations in the correct order
|
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
gw.Close()
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := gw.Close(); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateHashFile(path string, files []string) error {
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
data, err := os.ReadFile(file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.Sum256(data)
|
|
||||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,197 +0,0 @@
|
|||||||
This project is an exploration into the utility of Jetbrains' Junie
|
|
||||||
to write smaller but tedious programs.
|
|
||||||
|
|
||||||
Task: build a certificate bundling tool in cmd/cert-bundler. It
|
|
||||||
creates archives of certificates chains.
|
|
||||||
|
|
||||||
A YAML file for this looks something like:
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
config:
|
|
||||||
hashes: bundle.sha256
|
|
||||||
expiry: 1y
|
|
||||||
chains:
|
|
||||||
core_certs:
|
|
||||||
certs:
|
|
||||||
- root: roots/core-ca.pem
|
|
||||||
intermediates:
|
|
||||||
- int/cca1.pem
|
|
||||||
- int/cca2.pem
|
|
||||||
- int/cca3.pem
|
|
||||||
- root: roots/ssh-ca.pem
|
|
||||||
intermediates:
|
|
||||||
- ssh/ssh_dmz1.pem
|
|
||||||
- ssh/ssh_internal.pem
|
|
||||||
outputs:
|
|
||||||
include_single: true
|
|
||||||
include_individual: true
|
|
||||||
manifest: true
|
|
||||||
formats:
|
|
||||||
- zip
|
|
||||||
- tgz
|
|
||||||
```
|
|
||||||
|
|
||||||
Some requirements:
|
|
||||||
|
|
||||||
1. First, all the certificates should be loaded.
|
|
||||||
2. For each root, each of the indivudal intermediates should be
|
|
||||||
checked to make sure they are properly signed by the root CA.
|
|
||||||
3. The program should optionally take an expiration period (defaulting
|
|
||||||
to one year), specified in config.expiration, and if any certificate
|
|
||||||
is within that expiration period, a warning should be printed.
|
|
||||||
4. If outputs.include_single is true, all certificates under chains
|
|
||||||
should be concatenated into a single file.
|
|
||||||
5. If outputs.include_individual is true, all certificates under
|
|
||||||
chains should be included at the root level (e.g. int/cca2.pem
|
|
||||||
would be cca2.pem in the archive).
|
|
||||||
6. If bundle.manifest is true, a "MANIFEST" file is created with
|
|
||||||
SHA256 sums of each file included in the archive.
|
|
||||||
7. For each of the formats, create an archive file in the output
|
|
||||||
directory (specified with `-o`) with that format.
|
|
||||||
- If zip is included, create a .zip file.
|
|
||||||
- If tgz is included, create a .tar.gz file with default compression
|
|
||||||
levels.
|
|
||||||
- All archive files should include any generated files (single
|
|
||||||
and/or individual) in the top-level directory.
|
|
||||||
8. In the output directory, create a file with the same name as
|
|
||||||
config.hashes that contains the SHA256 sum of all files created.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
The outputs.include_single and outputs.include_individual describe
|
|
||||||
what should go in the final archive. If both are specified, the output
|
|
||||||
archive should include both a single bundle.pem and each individual
|
|
||||||
certificate, for example.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
As it stands, given the following `bundle.yaml`:
|
|
||||||
|
|
||||||
``` yaml
|
|
||||||
config:
|
|
||||||
hashes: bundle.sha256
|
|
||||||
expiry: 1y
|
|
||||||
chains:
|
|
||||||
core_certs:
|
|
||||||
certs:
|
|
||||||
- root: pems/gts-r1.pem
|
|
||||||
intermediates:
|
|
||||||
- pems/goog-wr2.pem
|
|
||||||
outputs:
|
|
||||||
include_single: true
|
|
||||||
include_individual: true
|
|
||||||
manifest: true
|
|
||||||
formats:
|
|
||||||
- zip
|
|
||||||
- tgz
|
|
||||||
- root: pems/isrg-root-x1.pem
|
|
||||||
intermediates:
|
|
||||||
- pems/le-e7.pem
|
|
||||||
outputs:
|
|
||||||
include_single: true
|
|
||||||
include_individual: false
|
|
||||||
manifest: true
|
|
||||||
formats:
|
|
||||||
- zip
|
|
||||||
- tgz
|
|
||||||
google_certs:
|
|
||||||
certs:
|
|
||||||
- root: pems/gts-r1.pem
|
|
||||||
intermediates:
|
|
||||||
- pems/goog-wr2.pem
|
|
||||||
outputs:
|
|
||||||
include_single: true
|
|
||||||
include_individual: false
|
|
||||||
manifest: true
|
|
||||||
formats:
|
|
||||||
- tgz
|
|
||||||
lets_encrypt:
|
|
||||||
certs:
|
|
||||||
- root: pems/isrg-root-x1.pem
|
|
||||||
intermediates:
|
|
||||||
- pems/le-e7.pem
|
|
||||||
outputs:
|
|
||||||
include_single: false
|
|
||||||
include_individual: true
|
|
||||||
manifest: false
|
|
||||||
formats:
|
|
||||||
- zip
|
|
||||||
```
|
|
||||||
|
|
||||||
The program outputs the following files:
|
|
||||||
|
|
||||||
- bundle.sha256
|
|
||||||
- core_certs_0.tgz (contains individual certs)
|
|
||||||
- core_certs_0.zip (contains individual certs)
|
|
||||||
- core_certs_1.tgz (contains core_certs.pem)
|
|
||||||
- core_certs_1.zip (contains core_certs.pem)
|
|
||||||
- google_certs_0.tgz
|
|
||||||
- lets_encrypt_0.zip
|
|
||||||
|
|
||||||
It should output
|
|
||||||
|
|
||||||
- bundle.sha256
|
|
||||||
- core_certs.tgz
|
|
||||||
- core_certs.zip
|
|
||||||
- google_certs.tgz
|
|
||||||
- lets_encrypt.zip
|
|
||||||
|
|
||||||
core_certs.* should contain `bundle.pem` and all the individual
|
|
||||||
certs. There should be no _$n$ variants of archives.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
Add an additional field to outputs: encoding. It should accept one of
|
|
||||||
`der`, `pem`, or `both`. If `der`, certificates should be output as a
|
|
||||||
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
|
|
||||||
should be output as a `.pem` file containing a PEM-encoded certificate.
|
|
||||||
If both, both the `.crt` and `.pem` certificate should be included.
|
|
||||||
|
|
||||||
For example, given the previous config, if `encoding` is der, the
|
|
||||||
google_certs.tgz archive should contain
|
|
||||||
|
|
||||||
- bundle.crt
|
|
||||||
- MANIFEST
|
|
||||||
|
|
||||||
Or with lets_encrypt.zip:
|
|
||||||
|
|
||||||
- isrg-root-x1.crt
|
|
||||||
- le-e7.crt
|
|
||||||
|
|
||||||
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
|
|
||||||
|
|
||||||
- isrg-root-x1.pem
|
|
||||||
- le-e7.pem
|
|
||||||
|
|
||||||
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
|
|
||||||
|
|
||||||
- isrg-root-x1.crt
|
|
||||||
- isrg-root-x1.pem
|
|
||||||
- le-e7.crt
|
|
||||||
- le-e7.pem
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
Move the format extensions to a global variable.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
Write a README.txt with a description of the bundle.yaml format.
|
|
||||||
|
|
||||||
Additionally, update the help text for the program (e.g. with `-h`)
|
|
||||||
to provide the same detailed information.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
It may be easier to embed the README.txt in the program on build.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
For the archive (tar.gz and zip) writers, make sure errors are
|
|
||||||
checked at the end, and don't just defer the close operations.
|
|
||||||
|
|
||||||
|
|
||||||
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
@@ -2,6 +2,19 @@ config:
|
|||||||
hashes: bundle.sha256
|
hashes: bundle.sha256
|
||||||
expiry: 1y
|
expiry: 1y
|
||||||
chains:
|
chains:
|
||||||
|
weird:
|
||||||
|
certs:
|
||||||
|
- root: pems/gts-r1.pem
|
||||||
|
intermediates:
|
||||||
|
- pems/goog-wr2.pem
|
||||||
|
- root: pems/isrg-root-x1.pem
|
||||||
|
outputs:
|
||||||
|
include_single: true
|
||||||
|
include_individual: true
|
||||||
|
manifest: true
|
||||||
|
formats:
|
||||||
|
- zip
|
||||||
|
- tgz
|
||||||
core_certs:
|
core_certs:
|
||||||
certs:
|
certs:
|
||||||
- root: pems/gts-r1.pem
|
- root: pems/gts-r1.pem
|
||||||
|
|||||||
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
@@ -1,4 +0,0 @@
|
|||||||
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
|
|
||||||
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
|
|
||||||
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
|
|
||||||
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip
|
|
||||||
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Binary file not shown.
@@ -1,20 +1,21 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"flag"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
hosts "git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
hosts "git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
||||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -23,6 +24,13 @@ var (
|
|||||||
verbose bool
|
verbose bool
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
strOK = "OK"
|
||||||
|
strExpired = "EXPIRED"
|
||||||
|
strRevoked = "REVOKED"
|
||||||
|
strUnknown = "UNKNOWN"
|
||||||
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flag.BoolVar(&hardfail, "hardfail", false, "treat revocation check failures as fatal")
|
flag.BoolVar(&hardfail, "hardfail", false, "treat revocation check failures as fatal")
|
||||||
flag.DurationVar(&timeout, "timeout", 10*time.Second, "network timeout for OCSP/CRL fetches and TLS site connects")
|
flag.DurationVar(&timeout, "timeout", 10*time.Second, "network timeout for OCSP/CRL fetches and TLS site connects")
|
||||||
@@ -30,8 +38,10 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
revoke.HardFail = hardfail
|
revoke.HardFail = hardfail
|
||||||
// Set HTTP client timeout for revocation library
|
// Build a proxy-aware HTTP client for OCSP/CRL fetches
|
||||||
revoke.HTTPClient.Timeout = timeout
|
if httpClient, err := lib.NewHTTPClient(lib.DialerOpts{Timeout: timeout}); err == nil {
|
||||||
|
revoke.HTTPClient = httpClient
|
||||||
|
}
|
||||||
|
|
||||||
if flag.NArg() == 0 {
|
if flag.NArg() == 0 {
|
||||||
fmt.Fprintf(os.Stderr, "Usage: %s [options] <target> [<target>...]\n", os.Args[0])
|
fmt.Fprintf(os.Stderr, "Usage: %s [options] <target> [<target>...]\n", os.Args[0])
|
||||||
@@ -42,16 +52,16 @@ func main() {
|
|||||||
for _, target := range flag.Args() {
|
for _, target := range flag.Args() {
|
||||||
status, err := processTarget(target)
|
status, err := processTarget(target)
|
||||||
switch status {
|
switch status {
|
||||||
case "OK":
|
case strOK:
|
||||||
fmt.Printf("%s: OK\n", target)
|
fmt.Printf("%s: %s\n", target, strOK)
|
||||||
case "EXPIRED":
|
case strExpired:
|
||||||
fmt.Printf("%s: EXPIRED: %v\n", target, err)
|
fmt.Printf("%s: %s: %v\n", target, strExpired, err)
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
case "REVOKED":
|
case strRevoked:
|
||||||
fmt.Printf("%s: REVOKED\n", target)
|
fmt.Printf("%s: %s\n", target, strRevoked)
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
case "UNKNOWN":
|
case strUnknown:
|
||||||
fmt.Printf("%s: UNKNOWN: %v\n", target, err)
|
fmt.Printf("%s: %s: %v\n", target, strUnknown, err)
|
||||||
if hardfail {
|
if hardfail {
|
||||||
// In hardfail, treat unknown as failure
|
// In hardfail, treat unknown as failure
|
||||||
exitCode = 1
|
exitCode = 1
|
||||||
@@ -67,74 +77,68 @@ func processTarget(target string) (string, error) {
|
|||||||
return checkFile(target)
|
return checkFile(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not a file; treat as site
|
|
||||||
return checkSite(target)
|
return checkSite(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFile(path string) (string, error) {
|
func checkFile(path string) (string, error) {
|
||||||
in, err := ioutil.ReadFile(path)
|
// Prefer high-level helpers from certlib to load certificates from disk
|
||||||
if err != nil {
|
if certs, err := certlib.LoadCertificates(path); err == nil && len(certs) > 0 {
|
||||||
return "UNKNOWN", err
|
// Evaluate the first certificate (leaf) by default
|
||||||
|
return evaluateCert(certs[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try PEM first; if that fails, try single DER cert
|
cert, err := certlib.LoadCertificate(path)
|
||||||
certs, err := certlib.ReadCertificates(in)
|
if err != nil || cert == nil {
|
||||||
if err != nil || len(certs) == 0 {
|
return strUnknown, err
|
||||||
cert, _, derr := certlib.ReadCertificate(in)
|
|
||||||
if derr != nil || cert == nil {
|
|
||||||
if err == nil {
|
|
||||||
err = derr
|
|
||||||
}
|
|
||||||
return "UNKNOWN", err
|
|
||||||
}
|
|
||||||
return evaluateCert(cert)
|
|
||||||
}
|
}
|
||||||
|
return evaluateCert(cert)
|
||||||
// Evaluate the first certificate (leaf) by default
|
|
||||||
return evaluateCert(certs[0])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSite(hostport string) (string, error) {
|
func checkSite(hostport string) (string, error) {
|
||||||
// Use certlib/hosts to parse host/port (supports https URLs and host:port)
|
// Use certlib/hosts to parse host/port (supports https URLs and host:port)
|
||||||
target, err := hosts.ParseHost(hostport)
|
target, err := hosts.ParseHost(hostport)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "UNKNOWN", err
|
return strUnknown, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &net.Dialer{Timeout: timeout}
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
conn, err := tls.DialWithDialer(d, "tcp", target.String(), &tls.Config{InsecureSkipVerify: true, ServerName: target.Host})
|
defer cancel()
|
||||||
|
|
||||||
|
// Use proxy-aware TLS dialer
|
||||||
|
conn, err := lib.DialTLS(ctx, target.String(), lib.DialerOpts{Timeout: timeout, TLSConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: true, // #nosec G402 -- CLI tool only verifies revocation
|
||||||
|
ServerName: target.Host,
|
||||||
|
}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "UNKNOWN", err
|
return strUnknown, err
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
state := conn.ConnectionState()
|
state := conn.ConnectionState()
|
||||||
if len(state.PeerCertificates) == 0 {
|
if len(state.PeerCertificates) == 0 {
|
||||||
return "UNKNOWN", errors.New("no peer certificates presented")
|
return strUnknown, errors.New("no peer certificates presented")
|
||||||
}
|
}
|
||||||
return evaluateCert(state.PeerCertificates[0])
|
return evaluateCert(state.PeerCertificates[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func evaluateCert(cert *x509.Certificate) (string, error) {
|
func evaluateCert(cert *x509.Certificate) (string, error) {
|
||||||
// Expiry check
|
// Delegate validity and revocation checks to certlib/revoke helper.
|
||||||
now := time.Now()
|
// It returns revoked=true for both revoked and expired/not-yet-valid.
|
||||||
if !now.Before(cert.NotAfter) {
|
// Map those cases back to our statuses using the returned error text.
|
||||||
return "EXPIRED", fmt.Errorf("expired at %s", cert.NotAfter)
|
|
||||||
}
|
|
||||||
if !now.After(cert.NotBefore) {
|
|
||||||
return "EXPIRED", fmt.Errorf("not valid until %s", cert.NotBefore)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Revocation check using certlib/revoke
|
|
||||||
revoked, ok, err := revoke.VerifyCertificateError(cert)
|
revoked, ok, err := revoke.VerifyCertificateError(cert)
|
||||||
if revoked {
|
if revoked {
|
||||||
// If revoked is true, ok will be true per implementation, err may describe why
|
if err != nil {
|
||||||
return "REVOKED", err
|
msg := err.Error()
|
||||||
|
if strings.Contains(msg, "expired") || strings.Contains(msg, "isn't valid until") ||
|
||||||
|
strings.Contains(msg, "not valid until") {
|
||||||
|
return strExpired, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strRevoked, err
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
// Revocation status could not be determined
|
// Revocation status could not be determined
|
||||||
return "UNKNOWN", err
|
return strUnknown, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return "OK", nil
|
return strOK, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
var hasPort = regexp.MustCompile(`:\d+$`)
|
var hasPort = regexp.MustCompile(`:\d+$`)
|
||||||
@@ -20,20 +24,22 @@ func main() {
|
|||||||
server += ":443"
|
server += ":443"
|
||||||
}
|
}
|
||||||
|
|
||||||
var chain string
|
// Use proxy-aware TLS dialer
|
||||||
|
conn, err := lib.DialTLS(context.Background(), server, lib.DialerOpts{TLSConfig: &tls.Config{}}) // #nosec G402
|
||||||
conn, err := tls.Dial("tcp", server, nil)
|
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
details := conn.ConnectionState()
|
details := conn.ConnectionState()
|
||||||
|
var chain strings.Builder
|
||||||
for _, cert := range details.PeerCertificates {
|
for _, cert := range details.PeerCertificates {
|
||||||
p := pem.Block{
|
p := pem.Block{
|
||||||
Type: "CERTIFICATE",
|
Type: "CERTIFICATE",
|
||||||
Bytes: cert.Raw,
|
Bytes: cert.Raw,
|
||||||
}
|
}
|
||||||
chain += string(pem.EncodeToMemory(&p))
|
chain.Write(pem.EncodeToMemory(&p))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(chain)
|
fmt.Fprintln(os.Stdout, chain.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,328 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/dsa"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
|
||||||
)
|
|
||||||
|
|
||||||
func certPublic(cert *x509.Certificate) string {
|
|
||||||
switch pub := cert.PublicKey.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return fmt.Sprintf("RSA-%d", pub.N.BitLen())
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
switch pub.Curve {
|
|
||||||
case elliptic.P256():
|
|
||||||
return "ECDSA-prime256v1"
|
|
||||||
case elliptic.P384():
|
|
||||||
return "ECDSA-secp384r1"
|
|
||||||
case elliptic.P521():
|
|
||||||
return "ECDSA-secp521r1"
|
|
||||||
default:
|
|
||||||
return "ECDSA (unknown curve)"
|
|
||||||
}
|
|
||||||
case *dsa.PublicKey:
|
|
||||||
return "DSA"
|
|
||||||
default:
|
|
||||||
return "Unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayName(name pkix.Name) string {
|
|
||||||
var ns []string
|
|
||||||
|
|
||||||
if name.CommonName != "" {
|
|
||||||
ns = append(ns, name.CommonName)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Country {
|
|
||||||
ns = append(ns, fmt.Sprintf("C=%s", name.Country[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Organization {
|
|
||||||
ns = append(ns, fmt.Sprintf("O=%s", name.Organization[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.OrganizationalUnit {
|
|
||||||
ns = append(ns, fmt.Sprintf("OU=%s", name.OrganizationalUnit[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Locality {
|
|
||||||
ns = append(ns, fmt.Sprintf("L=%s", name.Locality[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Province {
|
|
||||||
ns = append(ns, fmt.Sprintf("ST=%s", name.Province[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ns) > 0 {
|
|
||||||
return "/" + strings.Join(ns, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
return "*** no subject information ***"
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyUsages(ku x509.KeyUsage) string {
|
|
||||||
var uses []string
|
|
||||||
|
|
||||||
for u, s := range keyUsage {
|
|
||||||
if (ku & u) != 0 {
|
|
||||||
uses = append(uses, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(uses)
|
|
||||||
|
|
||||||
return strings.Join(uses, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func extUsage(ext []x509.ExtKeyUsage) string {
|
|
||||||
ns := make([]string, 0, len(ext))
|
|
||||||
for i := range ext {
|
|
||||||
ns = append(ns, extKeyUsages[ext[i]])
|
|
||||||
}
|
|
||||||
sort.Strings(ns)
|
|
||||||
|
|
||||||
return strings.Join(ns, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
func showBasicConstraints(cert *x509.Certificate) {
|
|
||||||
fmt.Printf("\tBasic constraints: ")
|
|
||||||
if cert.BasicConstraintsValid {
|
|
||||||
fmt.Printf("valid")
|
|
||||||
} else {
|
|
||||||
fmt.Printf("invalid")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cert.IsCA {
|
|
||||||
fmt.Printf(", is a CA certificate")
|
|
||||||
if !cert.BasicConstraintsValid {
|
|
||||||
fmt.Printf(" (basic constraint failure)")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("is not a CA certificate")
|
|
||||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
|
||||||
fmt.Printf(" (key encipherment usage enabled!)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cert.MaxPathLen == 0 && cert.MaxPathLenZero) || (cert.MaxPathLen > 0) {
|
|
||||||
fmt.Printf(", max path length %d", cert.MaxPathLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
const oneTrueDateFormat = "2006-01-02T15:04:05-0700"
|
|
||||||
|
|
||||||
var (
|
|
||||||
dateFormat string
|
|
||||||
showHash bool // if true, print a SHA256 hash of the certificate's Raw field
|
|
||||||
)
|
|
||||||
|
|
||||||
func wrapPrint(text string, indent int) {
|
|
||||||
tabs := ""
|
|
||||||
for i := 0; i < indent; i++ {
|
|
||||||
tabs += "\t"
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf(tabs+"%s\n", wrap(text, indent))
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayCert(cert *x509.Certificate) {
|
|
||||||
fmt.Println("CERTIFICATE")
|
|
||||||
if showHash {
|
|
||||||
fmt.Println(wrap(fmt.Sprintf("SHA256: %x", sha256.Sum256(cert.Raw)), 0))
|
|
||||||
}
|
|
||||||
fmt.Println(wrap("Subject: "+displayName(cert.Subject), 0))
|
|
||||||
fmt.Println(wrap("Issuer: "+displayName(cert.Issuer), 0))
|
|
||||||
fmt.Printf("\tSignature algorithm: %s / %s\n", sigAlgoPK(cert.SignatureAlgorithm),
|
|
||||||
sigAlgoHash(cert.SignatureAlgorithm))
|
|
||||||
fmt.Println("Details:")
|
|
||||||
wrapPrint("Public key: "+certPublic(cert), 1)
|
|
||||||
fmt.Printf("\tSerial number: %s\n", cert.SerialNumber)
|
|
||||||
|
|
||||||
if len(cert.AuthorityKeyId) > 0 {
|
|
||||||
fmt.Printf("\t%s\n", wrap("AKI: "+dumpHex(cert.AuthorityKeyId), 1))
|
|
||||||
}
|
|
||||||
if len(cert.SubjectKeyId) > 0 {
|
|
||||||
fmt.Printf("\t%s\n", wrap("SKI: "+dumpHex(cert.SubjectKeyId), 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapPrint("Valid from: "+cert.NotBefore.Format(dateFormat), 1)
|
|
||||||
fmt.Printf("\t until: %s\n", cert.NotAfter.Format(dateFormat))
|
|
||||||
fmt.Printf("\tKey usages: %s\n", keyUsages(cert.KeyUsage))
|
|
||||||
|
|
||||||
if len(cert.ExtKeyUsage) > 0 {
|
|
||||||
fmt.Printf("\tExtended usages: %s\n", extUsage(cert.ExtKeyUsage))
|
|
||||||
}
|
|
||||||
|
|
||||||
showBasicConstraints(cert)
|
|
||||||
|
|
||||||
validNames := make([]string, 0, len(cert.DNSNames)+len(cert.EmailAddresses)+len(cert.IPAddresses))
|
|
||||||
for i := range cert.DNSNames {
|
|
||||||
validNames = append(validNames, "dns:"+cert.DNSNames[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range cert.EmailAddresses {
|
|
||||||
validNames = append(validNames, "email:"+cert.EmailAddresses[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range cert.IPAddresses {
|
|
||||||
validNames = append(validNames, "ip:"+cert.IPAddresses[i].String())
|
|
||||||
}
|
|
||||||
|
|
||||||
sans := fmt.Sprintf("SANs (%d): %s\n", len(validNames), strings.Join(validNames, ", "))
|
|
||||||
wrapPrint(sans, 1)
|
|
||||||
|
|
||||||
l := len(cert.IssuingCertificateURL)
|
|
||||||
if l != 0 {
|
|
||||||
var aia string
|
|
||||||
if l == 1 {
|
|
||||||
aia = "AIA"
|
|
||||||
} else {
|
|
||||||
aia = "AIAs"
|
|
||||||
}
|
|
||||||
wrapPrint(fmt.Sprintf("%d %s:", l, aia), 1)
|
|
||||||
for _, url := range cert.IssuingCertificateURL {
|
|
||||||
wrapPrint(url, 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l = len(cert.OCSPServer)
|
|
||||||
if l > 0 {
|
|
||||||
title := "OCSP server"
|
|
||||||
if l > 1 {
|
|
||||||
title += "s"
|
|
||||||
}
|
|
||||||
wrapPrint(title+":\n", 1)
|
|
||||||
for _, ocspServer := range cert.OCSPServer {
|
|
||||||
wrapPrint(fmt.Sprintf("- %s\n", ocspServer), 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayAllCerts(in []byte, leafOnly bool) {
|
|
||||||
certs, err := certlib.ParseCertificatesPEM(in)
|
|
||||||
if err != nil {
|
|
||||||
certs, _, err = certlib.ParseCertificatesDER(in, "")
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "failed to parse certificates")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(certs) == 0 {
|
|
||||||
lib.Warnx("no certificates found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if leafOnly {
|
|
||||||
displayCert(certs[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range certs {
|
|
||||||
displayCert(certs[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayAllCertsWeb(uri string, leafOnly bool) {
|
|
||||||
ci := getConnInfo(uri)
|
|
||||||
conn, err := tls.Dial("tcp", ci.Addr, permissiveConfig())
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "couldn't connect to %s", ci.Addr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
state := conn.ConnectionState()
|
|
||||||
conn.Close()
|
|
||||||
|
|
||||||
conn, err = tls.Dial("tcp", ci.Addr, verifyConfig(ci.Host))
|
|
||||||
if err == nil {
|
|
||||||
err = conn.VerifyHostname(ci.Host)
|
|
||||||
if err == nil {
|
|
||||||
state = conn.ConnectionState()
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
} else {
|
|
||||||
lib.Warn(err, "TLS verification error with server name %s", ci.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(state.PeerCertificates) == 0 {
|
|
||||||
lib.Warnx("no certificates found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if leafOnly {
|
|
||||||
displayCert(state.PeerCertificates[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(state.VerifiedChains) == 0 {
|
|
||||||
lib.Warnx("no verified chains found; using peer chain")
|
|
||||||
for i := range state.PeerCertificates {
|
|
||||||
displayCert(state.PeerCertificates[i])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Println("TLS chain verified successfully.")
|
|
||||||
for i := range state.VerifiedChains {
|
|
||||||
fmt.Printf("--- Verified certificate chain %d ---\n", i+1)
|
|
||||||
for j := range state.VerifiedChains[i] {
|
|
||||||
displayCert(state.VerifiedChains[i][j])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var leafOnly bool
|
|
||||||
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
|
|
||||||
flag.StringVar(&dateFormat, "s", oneTrueDateFormat, "date `format` in Go time format")
|
|
||||||
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if flag.NArg() == 0 || (flag.NArg() == 1 && flag.Arg(0) == "-") {
|
|
||||||
certs, err := io.ReadAll(os.Stdin)
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "couldn't read certificates from standard input")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is needed for getting certs from JSON/jq.
|
|
||||||
certs = bytes.TrimSpace(certs)
|
|
||||||
certs = bytes.Replace(certs, []byte(`\n`), []byte{0xa}, -1)
|
|
||||||
certs = bytes.Trim(certs, `"`)
|
|
||||||
displayAllCerts(certs, leafOnly)
|
|
||||||
} else {
|
|
||||||
for _, filename := range flag.Args() {
|
|
||||||
fmt.Printf("--%s ---\n", filename)
|
|
||||||
if strings.HasPrefix(filename, "https://") {
|
|
||||||
displayAllCertsWeb(filename, leafOnly)
|
|
||||||
} else {
|
|
||||||
in, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "couldn't read certificate")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
displayAllCerts(in, leafOnly)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
45
cmd/certdump/main.go
Normal file
45
cmd/certdump/main.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
//lint:file-ignore SA1019 allow strict compatibility for old certs
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/dump"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
var config struct {
|
||||||
|
showHash bool
|
||||||
|
dateFormat string
|
||||||
|
leafOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.BoolVar(&config.showHash, "d", false, "show hashes of raw DER contents")
|
||||||
|
flag.StringVar(&config.dateFormat, "s", lib.OneTrueDateFormat, "date `format` in Go time format")
|
||||||
|
flag.BoolVar(&config.leafOnly, "l", false, "only show the leaf certificate")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
tlsCfg := &tls.Config{InsecureSkipVerify: true} // #nosec G402 - tool intentionally inspects broken TLS
|
||||||
|
|
||||||
|
for _, filename := range flag.Args() {
|
||||||
|
fmt.Fprintf(os.Stdout, "--%s ---%s", filename, "\n")
|
||||||
|
certs, err := lib.GetCertificateChain(filename, tlsCfg)
|
||||||
|
if err != nil {
|
||||||
|
lib.Warn(err, "couldn't read certificate")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.leafOnly {
|
||||||
|
dump.DisplayCert(os.Stdout, certs[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range certs {
|
||||||
|
dump.DisplayCert(os.Stdout, certs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,176 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/kr/text"
|
|
||||||
)
|
|
||||||
|
|
||||||
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
|
|
||||||
// "\2: \1,")
|
|
||||||
|
|
||||||
var keyUsage = map[x509.KeyUsage]string{
|
|
||||||
x509.KeyUsageDigitalSignature: "digital signature",
|
|
||||||
x509.KeyUsageContentCommitment: "content committment",
|
|
||||||
x509.KeyUsageKeyEncipherment: "key encipherment",
|
|
||||||
x509.KeyUsageKeyAgreement: "key agreement",
|
|
||||||
x509.KeyUsageDataEncipherment: "data encipherment",
|
|
||||||
x509.KeyUsageCertSign: "cert sign",
|
|
||||||
x509.KeyUsageCRLSign: "crl sign",
|
|
||||||
x509.KeyUsageEncipherOnly: "encipher only",
|
|
||||||
x509.KeyUsageDecipherOnly: "decipher only",
|
|
||||||
}
|
|
||||||
|
|
||||||
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
|
||||||
x509.ExtKeyUsageAny: "any",
|
|
||||||
x509.ExtKeyUsageServerAuth: "server auth",
|
|
||||||
x509.ExtKeyUsageClientAuth: "client auth",
|
|
||||||
x509.ExtKeyUsageCodeSigning: "code signing",
|
|
||||||
x509.ExtKeyUsageEmailProtection: "s/mime",
|
|
||||||
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
|
||||||
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
|
||||||
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
|
||||||
x509.ExtKeyUsageTimeStamping: "timestamping",
|
|
||||||
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
|
||||||
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
|
||||||
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
|
||||||
}
|
|
||||||
|
|
||||||
func pubKeyAlgo(a x509.PublicKeyAlgorithm) string {
|
|
||||||
switch a {
|
|
||||||
case x509.RSA:
|
|
||||||
return "RSA"
|
|
||||||
case x509.ECDSA:
|
|
||||||
return "ECDSA"
|
|
||||||
case x509.DSA:
|
|
||||||
return "DSA"
|
|
||||||
default:
|
|
||||||
return "unknown public key algorithm"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
|
||||||
switch a {
|
|
||||||
|
|
||||||
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
|
||||||
return "RSA"
|
|
||||||
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
|
||||||
return "ECDSA"
|
|
||||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
|
||||||
return "DSA"
|
|
||||||
default:
|
|
||||||
return "unknown public key algorithm"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
|
||||||
switch a {
|
|
||||||
case x509.MD2WithRSA:
|
|
||||||
return "MD2"
|
|
||||||
case x509.MD5WithRSA:
|
|
||||||
return "MD5"
|
|
||||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
|
||||||
return "SHA1"
|
|
||||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
|
||||||
return "SHA256"
|
|
||||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
|
||||||
return "SHA384"
|
|
||||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
|
||||||
return "SHA512"
|
|
||||||
default:
|
|
||||||
return "unknown hash algorithm"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxLine = 78
|
|
||||||
|
|
||||||
func makeIndent(n int) string {
|
|
||||||
s := " "
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
s += " "
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func indentLen(n int) int {
|
|
||||||
return 4 + (8 * n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// this isn't real efficient, but that's not a problem here
|
|
||||||
func wrap(s string, indent int) string {
|
|
||||||
if indent > 3 {
|
|
||||||
indent = 3
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapped := text.Wrap(s, maxLine)
|
|
||||||
lines := strings.SplitN(wrapped, "\n", 2)
|
|
||||||
if len(lines) == 1 {
|
|
||||||
return lines[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
if (maxLine - indentLen(indent)) <= 0 {
|
|
||||||
panic("too much indentation")
|
|
||||||
}
|
|
||||||
|
|
||||||
rest := strings.Join(lines[1:], " ")
|
|
||||||
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
|
||||||
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpHex(in []byte) string {
|
|
||||||
var s string
|
|
||||||
for i := range in {
|
|
||||||
s += fmt.Sprintf("%02X:", in[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Trim(s, ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
// permissiveConfig returns a maximally-accepting TLS configuration;
|
|
||||||
// the purpose is to look at the cert, not verify the security properties
|
|
||||||
// of the connection.
|
|
||||||
func permissiveConfig() *tls.Config {
|
|
||||||
return &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyConfig returns a config that will verify the connection.
|
|
||||||
func verifyConfig(hostname string) *tls.Config {
|
|
||||||
return &tls.Config{
|
|
||||||
ServerName: hostname,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type connInfo struct {
|
|
||||||
// The original URI provided.
|
|
||||||
URI string
|
|
||||||
|
|
||||||
// The hostname of the server.
|
|
||||||
Host string
|
|
||||||
|
|
||||||
// The port to connect on.
|
|
||||||
Port string
|
|
||||||
|
|
||||||
// The address to connect to.
|
|
||||||
Addr string
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConnInfo(uri string) *connInfo {
|
|
||||||
ci := &connInfo{URI: uri}
|
|
||||||
ci.Host = uri[len("https://"):]
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(ci.Host)
|
|
||||||
if err != nil {
|
|
||||||
ci.Port = "443"
|
|
||||||
} else {
|
|
||||||
ci.Host = host
|
|
||||||
ci.Port = port
|
|
||||||
}
|
|
||||||
ci.Addr = net.JoinHostPort(ci.Host, ci.Port)
|
|
||||||
return ci
|
|
||||||
}
|
|
||||||
@@ -2,99 +2,52 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"crypto/x509/pkix"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
"git.wntrmute.dev/kyle/goutils/certlib/verify"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
var warnOnly bool
|
|
||||||
var leeway = 2160 * time.Hour // three months
|
|
||||||
|
|
||||||
func displayName(name pkix.Name) string {
|
|
||||||
var ns []string
|
|
||||||
|
|
||||||
if name.CommonName != "" {
|
|
||||||
ns = append(ns, name.CommonName)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Country {
|
|
||||||
ns = append(ns, fmt.Sprintf("C=%s", name.Country[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Organization {
|
|
||||||
ns = append(ns, fmt.Sprintf("O=%s", name.Organization[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.OrganizationalUnit {
|
|
||||||
ns = append(ns, fmt.Sprintf("OU=%s", name.OrganizationalUnit[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Locality {
|
|
||||||
ns = append(ns, fmt.Sprintf("L=%s", name.Locality[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range name.Province {
|
|
||||||
ns = append(ns, fmt.Sprintf("ST=%s", name.Province[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ns) > 0 {
|
|
||||||
return "/" + strings.Join(ns, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
die.With("no subject information in root")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func expires(cert *x509.Certificate) time.Duration {
|
|
||||||
return cert.NotAfter.Sub(time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
func inDanger(cert *x509.Certificate) bool {
|
|
||||||
return expires(cert) < leeway
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkCert(cert *x509.Certificate) {
|
|
||||||
warn := inDanger(cert)
|
|
||||||
name := displayName(cert.Subject)
|
|
||||||
name = fmt.Sprintf("%s/SN=%s", name, cert.SerialNumber)
|
|
||||||
expiry := expires(cert)
|
|
||||||
if warnOnly {
|
|
||||||
if warn {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s expires on %s (in %s)\n", name, cert.NotAfter, expiry)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s expires on %s (in %s)\n", name, cert.NotAfter, expiry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
var (
|
||||||
|
skipVerify bool
|
||||||
|
strictTLS bool
|
||||||
|
leeway = verify.DefaultLeeway
|
||||||
|
warnOnly bool
|
||||||
|
)
|
||||||
|
|
||||||
|
lib.StrictTLSFlag(&strictTLS)
|
||||||
|
|
||||||
|
flag.BoolVar(&skipVerify, "k", false, "skip server verification") // #nosec G402
|
||||||
flag.BoolVar(&warnOnly, "q", false, "only warn about expiring certs")
|
flag.BoolVar(&warnOnly, "q", false, "only warn about expiring certs")
|
||||||
flag.DurationVar(&leeway, "t", leeway, "warn if certificates are closer than this to expiring")
|
flag.DurationVar(&leeway, "t", leeway, "warn if certificates are closer than this to expiring")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
for _, file := range flag.Args() {
|
tlsCfg, err := lib.BaselineTLSConfig(skipVerify, strictTLS)
|
||||||
in, err := ioutil.ReadFile(file)
|
die.If(err)
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "failed to read file")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
certs, err := certlib.ParseCertificatesPEM(in)
|
for _, file := range flag.Args() {
|
||||||
|
var certs []*x509.Certificate
|
||||||
|
|
||||||
|
certs, err = lib.GetCertificateChain(file, tlsCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "while parsing certificates")
|
_, _ = lib.Warn(err, "while parsing certificates")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cert := range certs {
|
for _, cert := range certs {
|
||||||
checkCert(cert)
|
check := verify.NewCertCheck(cert, leeway)
|
||||||
|
|
||||||
|
if warnOnly {
|
||||||
|
if err = check.Err(); err != nil {
|
||||||
|
lib.Warn(err, "certificate is expiring")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s expires on %s (in %s)\n", check.Name(),
|
||||||
|
cert.NotAfter, check.Expiry())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
59
cmd/certser/main.go
Normal file
59
cmd/certser/main.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
const displayInt lib.HexEncodeMode = iota
|
||||||
|
|
||||||
|
func parseDisplayMode(mode string) lib.HexEncodeMode {
|
||||||
|
mode = strings.ToLower(mode)
|
||||||
|
|
||||||
|
if mode == "int" {
|
||||||
|
return displayInt
|
||||||
|
}
|
||||||
|
|
||||||
|
return lib.ParseHexEncodeMode(mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func serialString(cert *x509.Certificate, mode lib.HexEncodeMode) string {
|
||||||
|
if mode == displayInt {
|
||||||
|
return cert.SerialNumber.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return lib.HexEncode(cert.SerialNumber.Bytes(), mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var skipVerify bool
|
||||||
|
var strictTLS bool
|
||||||
|
lib.StrictTLSFlag(&strictTLS)
|
||||||
|
displayAs := flag.String("d", "int", "display mode (int, hex, uhex)")
|
||||||
|
showExpiry := flag.Bool("e", false, "show expiry date")
|
||||||
|
flag.BoolVar(&skipVerify, "k", false, "skip server verification") // #nosec G402
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
tlsCfg, err := lib.BaselineTLSConfig(skipVerify, strictTLS)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
displayMode := parseDisplayMode(*displayAs)
|
||||||
|
|
||||||
|
for _, arg := range flag.Args() {
|
||||||
|
var cert *x509.Certificate
|
||||||
|
|
||||||
|
cert, err = lib.GetCertificate(arg, tlsCfg)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
|
fmt.Printf("%s: %s", arg, serialString(cert, displayMode))
|
||||||
|
if *showExpiry {
|
||||||
|
fmt.Printf(" (%s)", cert.NotAfter.Format("2006-01-02"))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,109 +4,90 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
"git.wntrmute.dev/kyle/goutils/certlib/revoke"
|
"git.wntrmute.dev/kyle/goutils/certlib/verify"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func printRevocation(cert *x509.Certificate) {
|
type appConfig struct {
|
||||||
remaining := time.Until(cert.NotAfter)
|
caFile, intFile string
|
||||||
fmt.Printf("certificate expires in %s.\n", lib.Duration(remaining))
|
forceIntermediateBundle bool
|
||||||
|
revexp, skipVerify, verbose bool
|
||||||
|
strictTLS bool
|
||||||
|
}
|
||||||
|
|
||||||
revoked, ok := revoke.VerifyCertificate(cert)
|
func parseFlags() appConfig {
|
||||||
if !ok {
|
var cfg appConfig
|
||||||
fmt.Fprintf(os.Stderr, "[!] the revocation check failed (failed to determine whether certificate\nwas revoked)")
|
flag.StringVar(&cfg.caFile, "ca", "", "CA certificate `bundle`")
|
||||||
return
|
flag.StringVar(&cfg.intFile, "i", "", "intermediate `bundle`")
|
||||||
|
flag.BoolVar(&cfg.forceIntermediateBundle, "f", false,
|
||||||
|
"force the use of the intermediate bundle, ignoring any intermediates bundled with certificate")
|
||||||
|
flag.BoolVar(&cfg.skipVerify, "k", false, "skip CA verification")
|
||||||
|
flag.BoolVar(&cfg.revexp, "r", false, "print revocation and expiry information")
|
||||||
|
flag.BoolVar(&cfg.verbose, "v", false, "verbose")
|
||||||
|
lib.StrictTLSFlag(&cfg.strictTLS)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if flag.NArg() == 0 {
|
||||||
|
die.With("usage: certverify targets...")
|
||||||
}
|
}
|
||||||
|
|
||||||
if revoked {
|
return cfg
|
||||||
fmt.Fprintf(os.Stderr, "[!] the certificate has been revoked\n")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var caFile, intFile string
|
var (
|
||||||
var forceIntermediateBundle, revexp, verbose bool
|
roots, ints *x509.CertPool
|
||||||
flag.StringVar(&caFile, "ca", "", "CA certificate `bundle`")
|
err error
|
||||||
flag.StringVar(&intFile, "i", "", "intermediate `bundle`")
|
failed bool
|
||||||
flag.BoolVar(&forceIntermediateBundle, "f", false,
|
)
|
||||||
"force the use of the intermediate bundle, ignoring any intermediates bundled with certificate")
|
|
||||||
flag.BoolVar(&revexp, "r", false, "print revocation and expiry information")
|
|
||||||
flag.BoolVar(&verbose, "v", false, "verbose")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
var roots *x509.CertPool
|
cfg := parseFlags()
|
||||||
if caFile != "" {
|
|
||||||
var err error
|
opts := &verify.Opts{
|
||||||
if verbose {
|
CheckRevocation: cfg.revexp,
|
||||||
fmt.Println("[+] loading root certificates from", caFile)
|
ForceIntermediates: cfg.forceIntermediateBundle,
|
||||||
|
Verbose: cfg.verbose,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.caFile != "" {
|
||||||
|
if cfg.verbose {
|
||||||
|
fmt.Printf("loading CA certificates from %s\n", cfg.caFile)
|
||||||
}
|
}
|
||||||
roots, err = certlib.LoadPEMCertPool(caFile)
|
|
||||||
|
roots, err = certlib.LoadPEMCertPool(cfg.caFile)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ints *x509.CertPool
|
if cfg.intFile != "" {
|
||||||
if intFile != "" {
|
if cfg.verbose {
|
||||||
var err error
|
fmt.Printf("loading intermediate certificates from %s\n", cfg.intFile)
|
||||||
if verbose {
|
|
||||||
fmt.Println("[+] loading intermediate certificates from", intFile)
|
|
||||||
}
|
}
|
||||||
ints, err = certlib.LoadPEMCertPool(caFile)
|
|
||||||
|
ints, err = certlib.LoadPEMCertPool(cfg.intFile)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
} else {
|
|
||||||
ints = x509.NewCertPool()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if flag.NArg() != 1 {
|
opts.Config, err = lib.BaselineTLSConfig(cfg.skipVerify, cfg.strictTLS)
|
||||||
fmt.Fprintf(os.Stderr, "Usage: %s [-ca bundle] [-i bundle] cert",
|
|
||||||
lib.ProgName())
|
|
||||||
}
|
|
||||||
|
|
||||||
fileData, err := ioutil.ReadFile(flag.Arg(0))
|
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
chain, err := certlib.ParseCertificatesPEM(fileData)
|
opts.Config.RootCAs = roots
|
||||||
die.If(err)
|
opts.Intermediates = ints
|
||||||
if verbose {
|
|
||||||
fmt.Printf("[+] %s has %d certificates\n", flag.Arg(0), len(chain))
|
|
||||||
}
|
|
||||||
|
|
||||||
cert := chain[0]
|
for _, arg := range flag.Args() {
|
||||||
if len(chain) > 1 {
|
_, err = verify.Chain(os.Stdout, arg, opts)
|
||||||
if !forceIntermediateBundle {
|
if err != nil {
|
||||||
for _, intermediate := range chain[1:] {
|
lib.Warn(err, "while verifying %s", arg)
|
||||||
if verbose {
|
failed = true
|
||||||
fmt.Printf("[+] adding intermediate with SKI %x\n", intermediate.SubjectKeyId)
|
} else {
|
||||||
}
|
fmt.Printf("%s: OK\n", arg)
|
||||||
|
|
||||||
ints.AddCert(intermediate)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := x509.VerifyOptions{
|
if failed {
|
||||||
Intermediates: ints,
|
|
||||||
Roots: roots,
|
|
||||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = cert.Verify(opts)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Verification failed: %v\n", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if verbose {
|
|
||||||
fmt.Println("OK")
|
|
||||||
}
|
|
||||||
|
|
||||||
if revexp {
|
|
||||||
printRevocation(cert)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -56,7 +58,7 @@ var modes = ssh.TerminalModes{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func sshAgent() ssh.AuthMethod {
|
func sshAgent() ssh.AuthMethod {
|
||||||
a, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
|
a, err := (&net.Dialer{}).DialContext(context.Background(), "unix", os.Getenv("SSH_AUTH_SOCK"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return ssh.PublicKeysCallback(agent.NewClient(a).Signers)
|
return ssh.PublicKeysCallback(agent.NewClient(a).Signers)
|
||||||
}
|
}
|
||||||
@@ -82,7 +84,7 @@ func scanner(host string, in io.Reader, out io.Writer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func logError(host string, err error, format string, args ...interface{}) {
|
func logError(host string, err error, format string, args ...any) {
|
||||||
msg := fmt.Sprintf(format, args...)
|
msg := fmt.Sprintf(format, args...)
|
||||||
log.Printf("[%s] FAILED: %s: %v\n", host, msg, err)
|
log.Printf("[%s] FAILED: %s: %v\n", host, msg, err)
|
||||||
}
|
}
|
||||||
@@ -93,7 +95,7 @@ func exec(wg *sync.WaitGroup, user, host string, commands []string) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||||
err := shutdown[i]()
|
err := shutdown[i]()
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
logError(host, err, "shutting down")
|
logError(host, err, "shutting down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -115,7 +117,7 @@ func exec(wg *sync.WaitGroup, user, host string, commands []string) {
|
|||||||
}
|
}
|
||||||
shutdown = append(shutdown, session.Close)
|
shutdown = append(shutdown, session.Close)
|
||||||
|
|
||||||
if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
|
if err = session.RequestPty("xterm", 80, 40, modes); err != nil {
|
||||||
session.Close()
|
session.Close()
|
||||||
logError(host, err, "request for pty failed")
|
logError(host, err, "request for pty failed")
|
||||||
return
|
return
|
||||||
@@ -150,7 +152,7 @@ func upload(wg *sync.WaitGroup, user, host, local, remote string) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||||
err := shutdown[i]()
|
err := shutdown[i]()
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
logError(host, err, "shutting down")
|
logError(host, err, "shutting down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -199,7 +201,7 @@ func upload(wg *sync.WaitGroup, user, host, local, remote string) {
|
|||||||
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
logError(host, err, "reading chunk")
|
logError(host, err, "reading chunk")
|
||||||
@@ -215,7 +217,7 @@ func download(wg *sync.WaitGroup, user, host, local, remote string) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
for i := len(shutdown) - 1; i >= 0; i-- {
|
for i := len(shutdown) - 1; i >= 0; i-- {
|
||||||
err := shutdown[i]()
|
err := shutdown[i]()
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
logError(host, err, "shutting down")
|
logError(host, err, "shutting down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -265,7 +267,7 @@ func download(wg *sync.WaitGroup, user, host, local, remote string) {
|
|||||||
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
fmt.Printf("[%s] wrote %d-byte chunk\n", host, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
logError(host, err, "reading chunk")
|
logError(host, err, "reading chunk")
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||||
@@ -26,7 +27,7 @@ func setupFile(hdr *tar.Header, file *os.File) error {
|
|||||||
if verbose {
|
if verbose {
|
||||||
fmt.Printf("\tchmod %0#o\n", hdr.Mode)
|
fmt.Printf("\tchmod %0#o\n", hdr.Mode)
|
||||||
}
|
}
|
||||||
err := file.Chmod(os.FileMode(hdr.Mode))
|
err := file.Chmod(os.FileMode(hdr.Mode & 0xFFFFFFFF)) // #nosec G115
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -48,73 +49,105 @@ func linkTarget(target, top string) string {
|
|||||||
return target
|
return target
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Clean(filepath.Join(target, top))
|
return filepath.Clean(filepath.Join(top, target))
|
||||||
|
}
|
||||||
|
|
||||||
|
// safeJoin joins base and elem and ensures the resulting path does not escape base.
|
||||||
|
func safeJoin(base, elem string) (string, error) {
|
||||||
|
cleanBase := filepath.Clean(base)
|
||||||
|
joined := filepath.Clean(filepath.Join(cleanBase, elem))
|
||||||
|
|
||||||
|
absBase, err := filepath.Abs(cleanBase)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
absJoined, err := filepath.Abs(joined)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
rel, err := filepath.Rel(absBase, absJoined)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
|
return "", fmt.Errorf("path traversal detected: %s escapes %s", elem, base)
|
||||||
|
}
|
||||||
|
return joined, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleTypeReg(tfr *tar.Reader, hdr *tar.Header, filePath string) error {
|
||||||
|
file, err := os.Create(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
if _, err = io.Copy(file, tfr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return setupFile(hdr, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleTypeLink(hdr *tar.Header, top, filePath string) error {
|
||||||
|
file, err := os.Create(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
srcPath, err := safeJoin(top, hdr.Linkname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
source, err := os.Open(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
if _, err = io.Copy(file, source); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return setupFile(hdr, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleTypeSymlink(hdr *tar.Header, top, filePath string) error {
|
||||||
|
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
||||||
|
return fmt.Errorf("symlink %s is outside the top-level %s", hdr.Linkname, top)
|
||||||
|
}
|
||||||
|
path := linkTarget(hdr.Linkname, top)
|
||||||
|
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
||||||
|
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleTypeDir(hdr *tar.Header, filePath string) error {
|
||||||
|
return os.MkdirAll(filePath, os.FileMode(hdr.Mode&0xFFFFFFFF)) // #nosec G115
|
||||||
}
|
}
|
||||||
|
|
||||||
func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
func processFile(tfr *tar.Reader, hdr *tar.Header, top string) error {
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Println(hdr.Name)
|
fmt.Println(hdr.Name)
|
||||||
}
|
}
|
||||||
filePath := filepath.Clean(filepath.Join(top, hdr.Name))
|
|
||||||
switch hdr.Typeflag {
|
|
||||||
case tar.TypeReg:
|
|
||||||
file, err := os.Create(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(file, tfr)
|
filePath, err := safeJoin(top, hdr.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
err = setupFile(hdr, file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case tar.TypeLink:
|
|
||||||
file, err := os.Create(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
source, err := os.Open(hdr.Linkname)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(file, source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = setupFile(hdr, file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case tar.TypeSymlink:
|
|
||||||
if !fileutil.ValidateSymlink(hdr.Linkname, top) {
|
|
||||||
return fmt.Errorf("symlink %s is outside the top-level %s",
|
|
||||||
hdr.Linkname, top)
|
|
||||||
}
|
|
||||||
path := linkTarget(hdr.Linkname, top)
|
|
||||||
if ok, err := filepath.Match(top+"/*", filepath.Clean(path)); !ok {
|
|
||||||
return fmt.Errorf("symlink %s isn't in %s", hdr.Linkname, top)
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err := os.Symlink(linkTarget(hdr.Linkname, top), filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case tar.TypeDir:
|
|
||||||
err := os.MkdirAll(filePath, os.FileMode(hdr.Mode))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeReg:
|
||||||
|
return handleTypeReg(tfr, hdr, filePath)
|
||||||
|
case tar.TypeLink:
|
||||||
|
return handleTypeLink(hdr, top, filePath)
|
||||||
|
case tar.TypeSymlink:
|
||||||
|
return handleTypeSymlink(hdr, top, filePath)
|
||||||
|
case tar.TypeDir:
|
||||||
|
return handleTypeDir(hdr, filePath)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -261,16 +294,16 @@ func main() {
|
|||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
tfr := tar.NewReader(r)
|
tfr := tar.NewReader(r)
|
||||||
|
var hdr *tar.Header
|
||||||
for {
|
for {
|
||||||
hdr, err := tfr.Next()
|
hdr, err = tfr.Next()
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
err = processFile(tfr, hdr, top)
|
err = processFile(tfr, hdr, top)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Close()
|
r.Close()
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"log"
|
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,17 +17,10 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
for _, fileName := range flag.Args() {
|
for _, fileName := range flag.Args() {
|
||||||
in, err := ioutil.ReadFile(fileName)
|
in, err := os.ReadFile(fileName)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
if p, _ := pem.Decode(in); p != nil {
|
csr, _, err := certlib.ParseCSR(in)
|
||||||
if p.Type != "CERTIFICATE REQUEST" {
|
|
||||||
log.Fatal("INVALID FILE TYPE")
|
|
||||||
}
|
|
||||||
in = p.Bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
csr, err := x509.ParseCertificateRequest(in)
|
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
out, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
|
out, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
|
||||||
@@ -48,8 +41,8 @@ func main() {
|
|||||||
Bytes: out,
|
Bytes: out,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(fileName+".pub", pem.EncodeToMemory(p), 0644)
|
err = os.WriteFile(fileName+".pub", pem.EncodeToMemory(p), 0o644) // #nosec G306
|
||||||
die.If(err)
|
die.If(err)
|
||||||
fmt.Printf("[+] wrote %s.\n", fileName+".pub")
|
fmt.Fprintf(os.Stdout, "[+] wrote %s.\n", fileName+".pub")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -152,7 +153,7 @@ func rsync(syncDir, target, excludeFile string, verboseRsync bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command(path, args...)
|
cmd := exec.CommandContext(context.Background(), path, args...)
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
return cmd.Run()
|
return cmd.Run()
|
||||||
@@ -163,7 +164,6 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
var logLevel, mountDir, syncDir, target string
|
var logLevel, mountDir, syncDir, target string
|
||||||
var dryRun, quietMode, noSyslog, verboseRsync bool
|
var dryRun, quietMode, noSyslog, verboseRsync bool
|
||||||
|
|
||||||
@@ -219,7 +219,7 @@ func main() {
|
|||||||
if excludeFile != "" {
|
if excludeFile != "" {
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Infof("removing exclude file %s", excludeFile)
|
log.Infof("removing exclude file %s", excludeFile)
|
||||||
if err := os.Remove(excludeFile); err != nil {
|
if rmErr := os.Remove(excludeFile); rmErr != nil {
|
||||||
log.Warningf("failed to remove temp file %s", excludeFile)
|
log.Warningf("failed to remove temp file %s", excludeFile)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -15,43 +15,41 @@ import (
|
|||||||
const defaultHashAlgorithm = "sha256"
|
const defaultHashAlgorithm = "sha256"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hAlgo string
|
hAlgo string
|
||||||
debug = dbg.New()
|
debug = dbg.New()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func openImage(imageFile string) (*os.File, []byte, error) {
|
||||||
func openImage(imageFile string) (image *os.File, hash []byte, err error) {
|
f, err := os.Open(imageFile)
|
||||||
image, err = os.Open(imageFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, err = ahash.SumReader(hAlgo, image)
|
h, err := ahash.SumReader(hAlgo, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = image.Seek(0, 0)
|
if _, err = f.Seek(0, 0); err != nil {
|
||||||
if err != nil {
|
return nil, nil, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Printf("%s %x\n", imageFile, hash)
|
debug.Printf("%s %x\n", imageFile, h)
|
||||||
return
|
return f, h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func openDevice(devicePath string) (device *os.File, err error) {
|
func openDevice(devicePath string) (*os.File, error) {
|
||||||
fi, err := os.Stat(devicePath)
|
fi, err := os.Stat(devicePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
device, err = os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
device, err := os.OpenFile(devicePath, os.O_RDWR|os.O_SYNC, fi.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return device, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -105,12 +103,12 @@ func main() {
|
|||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
if !bytes.Equal(deviceHash, hash) {
|
if !bytes.Equal(deviceHash, hash) {
|
||||||
fmt.Fprintln(os.Stderr, "Hash mismatch:")
|
buf := &bytes.Buffer{}
|
||||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", imageFile, hash)
|
fmt.Fprintln(buf, "Hash mismatch:")
|
||||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", devicePath, deviceHash)
|
fmt.Fprintf(buf, "\t%s: %s\n", imageFile, hash)
|
||||||
os.Exit(1)
|
fmt.Fprintf(buf, "\t%s: %s\n", devicePath, deviceHash)
|
||||||
|
die.With(buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Println("OK")
|
debug.Println("OK")
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,33 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
)
|
)
|
||||||
|
|
||||||
func usage(w io.Writer, exc int) {
|
func usage(w io.Writer, exc int) {
|
||||||
fmt.Fprintln(w, `usage: dumpbytes <file>`)
|
fmt.Fprintln(w, `usage: dumpbytes -n tabs <file>`)
|
||||||
os.Exit(exc)
|
os.Exit(exc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func printBytes(buf []byte) {
|
func printBytes(buf []byte) {
|
||||||
fmt.Printf("\t")
|
fmt.Printf("\t")
|
||||||
for i := 0; i < len(buf); i++ {
|
for i := range buf {
|
||||||
fmt.Printf("0x%02x, ", buf[i])
|
fmt.Printf("0x%02x, ", buf[i])
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpFile(path string, indentLevel int) error {
|
func dumpFile(path string, indentLevel int) error {
|
||||||
indent := ""
|
var indent strings.Builder
|
||||||
for i := 0; i < indentLevel; i++ {
|
for range indentLevel {
|
||||||
indent += "\t"
|
indent.WriteByte('\t')
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
@@ -34,13 +37,14 @@ func dumpFile(path string, indentLevel int) error {
|
|||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
fmt.Printf("%svar buffer = []byte{\n", indent)
|
fmt.Printf("%svar buffer = []byte{\n", indent.String())
|
||||||
|
var n int
|
||||||
for {
|
for {
|
||||||
buf := make([]byte, 8)
|
buf := make([]byte, 8)
|
||||||
n, err := file.Read(buf)
|
n, err = file.Read(buf)
|
||||||
if err == io.EOF {
|
if errors.Is(err, io.EOF) {
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
fmt.Printf("%s", indent)
|
fmt.Printf("%s", indent.String())
|
||||||
printBytes(buf[:n])
|
printBytes(buf[:n])
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@@ -50,11 +54,11 @@ func dumpFile(path string, indentLevel int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s", indent)
|
fmt.Printf("%s", indent.String())
|
||||||
printBytes(buf[:n])
|
printBytes(buf[:n])
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s}\n", indent)
|
fmt.Printf("%s}\n", indent.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
)
|
)
|
||||||
|
|
||||||
// size of a kilobit in bytes
|
// size of a kilobit in bytes.
|
||||||
const kilobit = 128
|
const kilobit = 128
|
||||||
const pageSize = 4096
|
const pageSize = 4096
|
||||||
|
|
||||||
@@ -26,10 +26,10 @@ func main() {
|
|||||||
path = flag.Arg(0)
|
path = flag.Arg(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
fillByte := uint8(*fill)
|
fillByte := uint8(*fill & 0xff) // #nosec G115 clearing out of bounds bits
|
||||||
|
|
||||||
buf := make([]byte, pageSize)
|
buf := make([]byte, pageSize)
|
||||||
for i := 0; i < pageSize; i++ {
|
for i := range pageSize {
|
||||||
buf[i] = fillByte
|
buf[i] = fillByte
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,7 +40,7 @@ func main() {
|
|||||||
die.If(err)
|
die.If(err)
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
for i := 0; i < pages; i++ {
|
for range pages {
|
||||||
_, err = file.Write(buf)
|
_, err = file.Write(buf)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,15 +72,13 @@ func main() {
|
|||||||
|
|
||||||
if end < start {
|
if end < start {
|
||||||
fmt.Fprintln(os.Stderr, "[!] end < start, swapping values")
|
fmt.Fprintln(os.Stderr, "[!] end < start, swapping values")
|
||||||
tmp := end
|
start, end = end, start
|
||||||
end = start
|
|
||||||
start = tmp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var fmtStr string
|
var fmtStr string
|
||||||
|
|
||||||
if !*quiet {
|
if !*quiet {
|
||||||
maxLine := fmt.Sprintf("%d", len(lines))
|
maxLine := strconv.Itoa(len(lines))
|
||||||
fmtStr = fmt.Sprintf("%%0%dd: %%s", len(maxLine))
|
fmtStr = fmt.Sprintf("%%0%dd: %%s", len(maxLine))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,9 +96,9 @@ func main() {
|
|||||||
fmtStr += "\n"
|
fmtStr += "\n"
|
||||||
for i := start; !endFunc(i); i++ {
|
for i := start; !endFunc(i); i++ {
|
||||||
if *quiet {
|
if *quiet {
|
||||||
fmt.Println(lines[i])
|
fmt.Fprintln(os.Stdout, lines[i])
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(fmtStr, i, lines[i])
|
fmt.Fprintf(os.Stdout, fmtStr, i, lines[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
@@ -8,7 +9,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func lookupHost(host string) error {
|
func lookupHost(host string) error {
|
||||||
cname, err := net.LookupCNAME(host)
|
r := &net.Resolver{}
|
||||||
|
cname, err := r.LookupCNAME(context.Background(), host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -18,7 +20,7 @@ func lookupHost(host string) error {
|
|||||||
host = cname
|
host = cname
|
||||||
}
|
}
|
||||||
|
|
||||||
addrs, err := net.LookupHost(host)
|
addrs, err := r.LookupHost(context.Background(), host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
@@ -16,20 +16,20 @@ func prettify(file string, validateOnly bool) error {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if file == "-" {
|
if file == "-" {
|
||||||
in, err = ioutil.ReadAll(os.Stdin)
|
in, err = io.ReadAll(os.Stdin)
|
||||||
} else {
|
} else {
|
||||||
in, err = ioutil.ReadFile(file)
|
in, err = os.ReadFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "ReadFile")
|
_, _ = lib.Warn(err, "ReadFile")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf = &bytes.Buffer{}
|
var buf = &bytes.Buffer{}
|
||||||
err = json.Indent(buf, in, "", " ")
|
err = json.Indent(buf, in, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "%s", file)
|
_, _ = lib.Warn(err, "%s", file)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,11 +40,11 @@ func prettify(file string, validateOnly bool) error {
|
|||||||
if file == "-" {
|
if file == "-" {
|
||||||
_, err = os.Stdout.Write(buf.Bytes())
|
_, err = os.Stdout.Write(buf.Bytes())
|
||||||
} else {
|
} else {
|
||||||
err = ioutil.WriteFile(file, buf.Bytes(), 0644)
|
err = os.WriteFile(file, buf.Bytes(), 0o644)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "WriteFile")
|
_, _ = lib.Warn(err, "WriteFile")
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -55,20 +55,20 @@ func compact(file string, validateOnly bool) error {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if file == "-" {
|
if file == "-" {
|
||||||
in, err = ioutil.ReadAll(os.Stdin)
|
in, err = io.ReadAll(os.Stdin)
|
||||||
} else {
|
} else {
|
||||||
in, err = ioutil.ReadFile(file)
|
in, err = os.ReadFile(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "ReadFile")
|
_, _ = lib.Warn(err, "ReadFile")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf = &bytes.Buffer{}
|
var buf = &bytes.Buffer{}
|
||||||
err = json.Compact(buf, in)
|
err = json.Compact(buf, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "%s", file)
|
_, _ = lib.Warn(err, "%s", file)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,11 +79,11 @@ func compact(file string, validateOnly bool) error {
|
|||||||
if file == "-" {
|
if file == "-" {
|
||||||
_, err = os.Stdout.Write(buf.Bytes())
|
_, err = os.Stdout.Write(buf.Bytes())
|
||||||
} else {
|
} else {
|
||||||
err = ioutil.WriteFile(file, buf.Bytes(), 0644)
|
err = os.WriteFile(file, buf.Bytes(), 0o644)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "WriteFile")
|
_, _ = lib.Warn(err, "WriteFile")
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -91,7 +91,7 @@ func compact(file string, validateOnly bool) error {
|
|||||||
|
|
||||||
func usage() {
|
func usage() {
|
||||||
progname := lib.ProgName()
|
progname := lib.ProgName()
|
||||||
fmt.Printf(`Usage: %s [-h] files...
|
fmt.Fprintf(os.Stdout, `Usage: %s [-h] files...
|
||||||
%s is used to lint and prettify (or compact) JSON files. The
|
%s is used to lint and prettify (or compact) JSON files. The
|
||||||
files will be updated in-place.
|
files will be updated in-place.
|
||||||
|
|
||||||
@@ -100,7 +100,6 @@ func usage() {
|
|||||||
-h Print this help message.
|
-h Print this help message.
|
||||||
-n Don't prettify; only perform validation.
|
-n Don't prettify; only perform validation.
|
||||||
`, progname, progname)
|
`, progname, progname)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -3,15 +3,31 @@ kgz
|
|||||||
kgz is like gzip, but supports compressing and decompressing to a different
|
kgz is like gzip, but supports compressing and decompressing to a different
|
||||||
directory than the source file is in.
|
directory than the source file is in.
|
||||||
|
|
||||||
Usage: kgz [-l] source [target]
|
Usage: kgz [-l] [-k] [-m] [-x] [--uid N] [--gid N] source [target]
|
||||||
|
|
||||||
If target is a directory, the basename of the sourcefile will be used
|
If target is a directory, the basename of the source file will be used
|
||||||
as the target filename. Compression and decompression is selected
|
as the target filename. Compression and decompression is selected
|
||||||
based on whether the source filename ends in ".gz".
|
based on whether the source filename ends in ".gz".
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
-l level Compression level (0-9). Only meaninful when
|
-l level Compression level (0-9). Only meaningful when compressing.
|
||||||
compressing a file.
|
-u Do not restrict the size during decompression. As
|
||||||
|
a safeguard against gzip bombs, the maximum size
|
||||||
|
allowed is 32 * the compressed file size.
|
||||||
|
-k Keep the source file (do not remove it after successful
|
||||||
|
compression or decompression).
|
||||||
|
-m On decompression, set the file mtime from the gzip header.
|
||||||
|
-x On compression, include uid/gid/mode/ctime in the gzip Extra
|
||||||
|
field so that decompression can restore them. The Extra payload
|
||||||
|
is an ASN.1 DER-encoded struct.
|
||||||
|
--uid N When used with -x, set UID in Extra to N (override source).
|
||||||
|
--gid N When used with -x, set GID in Extra to N (override source).
|
||||||
|
|
||||||
|
Metadata notes:
|
||||||
|
- mtime is stored in the standard gzip header and restored with -m.
|
||||||
|
- uid/gid/mode/ctime are stored in a kgz-specific Extra subfield as an ASN.1
|
||||||
|
DER-encoded struct. Restoring
|
||||||
|
uid/gid may fail without sufficient privileges; such errors are ignored.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
281
cmd/kgz/main.go
281
cmd/kgz/main.go
@@ -3,81 +3,276 @@ package main
|
|||||||
import (
|
import (
|
||||||
"compress/flate"
|
"compress/flate"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"encoding/asn1"
|
||||||
|
"encoding/binary"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
goutilslib "git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
const gzipExt = ".gz"
|
const gzipExt = ".gz"
|
||||||
|
|
||||||
func compress(path, target string, level int) error {
|
// kgzExtraID is the two-byte subfield identifier used in the gzip Extra field
|
||||||
|
// for kgz-specific metadata.
|
||||||
|
var kgzExtraID = [2]byte{'K', 'G'}
|
||||||
|
|
||||||
|
// buildKGExtra constructs the gzip Extra subfield payload for kgz metadata.
|
||||||
|
//
|
||||||
|
// The payload is an ASN.1 DER-encoded struct with the following fields:
|
||||||
|
//
|
||||||
|
// Version INTEGER (currently 1)
|
||||||
|
// UID INTEGER
|
||||||
|
// GID INTEGER
|
||||||
|
// Mode INTEGER (permission bits)
|
||||||
|
// CTimeSec INTEGER (seconds)
|
||||||
|
// CTimeNSec INTEGER (nanoseconds)
|
||||||
|
//
|
||||||
|
// The ASN.1 blob is wrapped in a gzip Extra subfield with ID 'K','G'.
|
||||||
|
func buildKGExtra(uid, gid, mode uint32, ctimeS int64, ctimeNs int32) []byte {
|
||||||
|
// Define the ASN.1 structure to encode
|
||||||
|
type KGZExtra struct {
|
||||||
|
Version int
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
Mode int
|
||||||
|
CTimeSec int64
|
||||||
|
CTimeNSec int32
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := asn1.Marshal(KGZExtra{
|
||||||
|
Version: 1,
|
||||||
|
UID: int(uid),
|
||||||
|
GID: int(gid),
|
||||||
|
Mode: int(mode),
|
||||||
|
CTimeSec: ctimeS,
|
||||||
|
CTimeNSec: ctimeNs,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// On marshal failure, return empty to avoid breaking compression
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap in gzip subfield: [ID1 ID2 LEN(lo) LEN(hi) PAYLOAD]
|
||||||
|
// Guard against payload length overflow to uint16 for the extra subfield length.
|
||||||
|
if len(payload) > int(math.MaxUint16) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
extra := make([]byte, 4+len(payload))
|
||||||
|
extra[0] = kgzExtraID[0]
|
||||||
|
extra[1] = kgzExtraID[1]
|
||||||
|
binary.LittleEndian.PutUint16(extra[2:], uint16(len(payload)&0xFFFF)) //#nosec G115 - masked
|
||||||
|
copy(extra[4:], payload)
|
||||||
|
return extra
|
||||||
|
}
|
||||||
|
|
||||||
|
// clampToInt32 clamps an int value into the int32 range using a switch to
|
||||||
|
// satisfy linters that prefer switch over if-else chains for ordered checks.
|
||||||
|
func clampToInt32(v int) int32 {
|
||||||
|
switch {
|
||||||
|
case v > int(math.MaxInt32):
|
||||||
|
return math.MaxInt32
|
||||||
|
case v < int(math.MinInt32):
|
||||||
|
return math.MinInt32
|
||||||
|
default:
|
||||||
|
return int32(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildExtraForPath prepares the gzip Extra field for kgz by collecting
|
||||||
|
// uid/gid/mode and ctime information, applying any overrides, and encoding it.
|
||||||
|
func buildExtraForPath(st unix.Stat_t, path string, setUID, setGID int) []byte {
|
||||||
|
uid := st.Uid
|
||||||
|
gid := st.Gid
|
||||||
|
if setUID >= 0 {
|
||||||
|
if uint64(setUID) <= math.MaxUint32 {
|
||||||
|
uid = uint32(setUID & 0xFFFFFFFF) //#nosec G115 - masked
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if setGID >= 0 {
|
||||||
|
if uint64(setGID) <= math.MaxUint32 {
|
||||||
|
gid = uint32(setGID & 0xFFFFFFFF) //#nosec G115 - masked
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mode := uint32(st.Mode & 0o7777)
|
||||||
|
|
||||||
|
// Use portable helper to gather ctime
|
||||||
|
var cts int64
|
||||||
|
var ctns int32
|
||||||
|
if ft, err := goutilslib.LoadFileTime(path); err == nil {
|
||||||
|
cts = ft.Changed.Unix()
|
||||||
|
ctns = clampToInt32(ft.Changed.Nanosecond())
|
||||||
|
}
|
||||||
|
|
||||||
|
return buildKGExtra(uid, gid, mode, cts, ctns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseKGExtra scans a gzip Extra blob and returns kgz metadata if present.
|
||||||
|
func parseKGExtra(extra []byte) (uint32, uint32, uint32, int64, int32, bool) {
|
||||||
|
i := 0
|
||||||
|
for i+4 <= len(extra) {
|
||||||
|
id1 := extra[i]
|
||||||
|
id2 := extra[i+1]
|
||||||
|
l := int(binary.LittleEndian.Uint16(extra[i+2 : i+4]))
|
||||||
|
i += 4
|
||||||
|
if i+l > len(extra) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if id1 == kgzExtraID[0] && id2 == kgzExtraID[1] {
|
||||||
|
// ASN.1 decode payload
|
||||||
|
payload := extra[i : i+l]
|
||||||
|
var s struct {
|
||||||
|
Version int
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
Mode int
|
||||||
|
CTimeSec int64
|
||||||
|
CTimeNSec int32
|
||||||
|
}
|
||||||
|
if _, err := asn1.Unmarshal(payload, &s); err != nil {
|
||||||
|
return 0, 0, 0, 0, 0, false
|
||||||
|
}
|
||||||
|
if s.Version != 1 {
|
||||||
|
return 0, 0, 0, 0, 0, false
|
||||||
|
}
|
||||||
|
// Validate ranges before converting from int -> uint32 to avoid overflow.
|
||||||
|
if s.UID < 0 || s.GID < 0 || s.Mode < 0 {
|
||||||
|
return 0, 0, 0, 0, 0, false
|
||||||
|
}
|
||||||
|
if uint64(s.UID) > math.MaxUint32 || uint64(s.GID) > math.MaxUint32 || uint64(s.Mode) > math.MaxUint32 {
|
||||||
|
return 0, 0, 0, 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint32(s.UID & 0xFFFFFFFF), uint32(s.GID & 0xFFFFFFFF),
|
||||||
|
uint32(s.Mode & 0xFFFFFFFF), s.CTimeSec, s.CTimeNSec, true //#nosec G115 - masked
|
||||||
|
}
|
||||||
|
i += l
|
||||||
|
}
|
||||||
|
return 0, 0, 0, 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func compress(path, target string, level int, includeExtra bool, setUID, setGID int) error {
|
||||||
sourceFile, err := os.Open(path)
|
sourceFile, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for read")
|
return fmt.Errorf("opening file for read: %w", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer sourceFile.Close()
|
||||||
|
|
||||||
|
// Gather file metadata
|
||||||
|
var st unix.Stat_t
|
||||||
|
if err = unix.Stat(path, &st); err != nil {
|
||||||
|
return fmt.Errorf("stat source: %w", err)
|
||||||
|
}
|
||||||
|
fi, err := sourceFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("stat source file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
destFile, err := os.Create(target)
|
destFile, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for write")
|
return fmt.Errorf("opening file for write: %w", err)
|
||||||
}
|
}
|
||||||
defer destFile.Close()
|
defer destFile.Close()
|
||||||
|
|
||||||
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
|
gzipCompressor, err := gzip.NewWriterLevel(destFile, level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "invalid compression level")
|
return fmt.Errorf("invalid compression level: %w", err)
|
||||||
|
}
|
||||||
|
// Set header metadata
|
||||||
|
gzipCompressor.ModTime = fi.ModTime()
|
||||||
|
if includeExtra {
|
||||||
|
gzipCompressor.Extra = buildExtraForPath(st, path, setUID, setGID)
|
||||||
}
|
}
|
||||||
defer gzipCompressor.Close()
|
defer gzipCompressor.Close()
|
||||||
|
|
||||||
_, err = io.Copy(gzipCompressor, sourceFile)
|
_, err = io.Copy(gzipCompressor, sourceFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "compressing file")
|
return fmt.Errorf("compressing file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func uncompress(path, target string) error {
|
func uncompress(path, target string, unrestrict bool, preserveMtime bool) error {
|
||||||
sourceFile, err := os.Open(path)
|
sourceFile, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for read")
|
return fmt.Errorf("opening file for read: %w", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer sourceFile.Close()
|
||||||
|
|
||||||
|
fi, err := sourceFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reading file stats: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxDecompressionSize := fi.Size() * 32
|
||||||
|
|
||||||
gzipUncompressor, err := gzip.NewReader(sourceFile)
|
gzipUncompressor, err := gzip.NewReader(sourceFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "reading gzip headers")
|
return fmt.Errorf("reading gzip headers: %w", err)
|
||||||
}
|
}
|
||||||
defer gzipUncompressor.Close()
|
defer gzipUncompressor.Close()
|
||||||
|
|
||||||
|
var reader io.Reader = &io.LimitedReader{
|
||||||
|
R: gzipUncompressor,
|
||||||
|
N: maxDecompressionSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
if unrestrict {
|
||||||
|
reader = gzipUncompressor
|
||||||
|
}
|
||||||
|
|
||||||
destFile, err := os.Create(target)
|
destFile, err := os.Create(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "opening file for write")
|
return fmt.Errorf("opening file for write: %w", err)
|
||||||
}
|
}
|
||||||
defer destFile.Close()
|
defer destFile.Close()
|
||||||
|
|
||||||
_, err = io.Copy(destFile, gzipUncompressor)
|
_, err = io.Copy(destFile, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "uncompressing file")
|
return fmt.Errorf("uncompressing file: %w", err)
|
||||||
|
}
|
||||||
|
// Apply metadata from Extra (uid/gid/mode) if present
|
||||||
|
if gzipUncompressor.Header.Extra != nil {
|
||||||
|
if uid, gid, mode, _, _, ok := parseKGExtra(gzipUncompressor.Header.Extra); ok {
|
||||||
|
// Chmod
|
||||||
|
_ = os.Chmod(target, os.FileMode(mode))
|
||||||
|
// Chown (may fail without privileges)
|
||||||
|
_ = os.Chown(target, int(uid), int(gid))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Preserve mtime if requested
|
||||||
|
if preserveMtime {
|
||||||
|
mt := gzipUncompressor.Header.ModTime
|
||||||
|
if !mt.IsZero() {
|
||||||
|
// Set both atime and mtime to mt for simplicity
|
||||||
|
_ = os.Chtimes(target, mt, mt)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func usage(w io.Writer) {
|
func usage(w io.Writer) {
|
||||||
fmt.Fprintf(w, `Usage: %s [-l] source [target]
|
fmt.Fprintf(w, `Usage: %s [-l] [-k] [-m] [-x] [--uid N] [--gid N] source [target]
|
||||||
|
|
||||||
kgz is like gzip, but supports compressing and decompressing to a different
|
kgz is like gzip, but supports compressing and decompressing to a different
|
||||||
directory than the source file is in.
|
directory than the source file is in.
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
-l level Compression level (0-9). Only meaninful when
|
-l level Compression level (0-9). Only meaningful when compressing.
|
||||||
compressing a file.
|
-u Do not restrict the size during decompression (gzip bomb guard is 32x).
|
||||||
|
-k Keep the source file (do not remove it after successful (de)compression).
|
||||||
|
-m On decompression, set the file mtime from the gzip header.
|
||||||
|
-x On compression, include uid/gid/mode/ctime in the gzip Extra field.
|
||||||
|
--uid N When used with -x, set UID in Extra to N (overrides source owner).
|
||||||
|
--gid N When used with -x, set GID in Extra to N (overrides source group).
|
||||||
`, os.Args[0])
|
`, os.Args[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,8 +284,8 @@ func isDir(path string) bool {
|
|||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
stat, err := file.Stat()
|
stat, err2 := file.Stat()
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,7 +304,7 @@ func pathForUncompressing(source, dest string) (string, error) {
|
|||||||
|
|
||||||
source = filepath.Base(source)
|
source = filepath.Base(source)
|
||||||
if !strings.HasSuffix(source, gzipExt) {
|
if !strings.HasSuffix(source, gzipExt) {
|
||||||
return "", errors.Errorf("%s is a not gzip-compressed file", source)
|
return "", fmt.Errorf("%s is a not gzip-compressed file", source)
|
||||||
}
|
}
|
||||||
outFile := source[:len(source)-len(gzipExt)]
|
outFile := source[:len(source)-len(gzipExt)]
|
||||||
outFile = filepath.Join(dest, outFile)
|
outFile = filepath.Join(dest, outFile)
|
||||||
@@ -123,7 +318,7 @@ func pathForCompressing(source, dest string) (string, error) {
|
|||||||
|
|
||||||
source = filepath.Base(source)
|
source = filepath.Base(source)
|
||||||
if strings.HasSuffix(source, gzipExt) {
|
if strings.HasSuffix(source, gzipExt) {
|
||||||
return "", errors.Errorf("%s is a gzip-compressed file", source)
|
return "", fmt.Errorf("%s is a gzip-compressed file", source)
|
||||||
}
|
}
|
||||||
|
|
||||||
dest = filepath.Join(dest, source+gzipExt)
|
dest = filepath.Join(dest, source+gzipExt)
|
||||||
@@ -134,8 +329,21 @@ func main() {
|
|||||||
var level int
|
var level int
|
||||||
var path string
|
var path string
|
||||||
var target = "."
|
var target = "."
|
||||||
|
var err error
|
||||||
|
var unrestrict bool
|
||||||
|
var keep bool
|
||||||
|
var preserveMtime bool
|
||||||
|
var includeExtra bool
|
||||||
|
var setUID int
|
||||||
|
var setGID int
|
||||||
|
|
||||||
flag.IntVar(&level, "l", flate.DefaultCompression, "compression level")
|
flag.IntVar(&level, "l", flate.DefaultCompression, "compression level")
|
||||||
|
flag.BoolVar(&unrestrict, "u", false, "do not restrict decompression")
|
||||||
|
flag.BoolVar(&keep, "k", false, "keep the source file (do not remove it)")
|
||||||
|
flag.BoolVar(&preserveMtime, "m", false, "on decompression, set mtime from gzip header")
|
||||||
|
flag.BoolVar(&includeExtra, "x", false, "on compression, include uid/gid/mode/ctime in gzip Extra")
|
||||||
|
flag.IntVar(&setUID, "uid", -1, "when used with -x, set UID in Extra to this value")
|
||||||
|
flag.IntVar(&setGID, "gid", -1, "when used with -x, set GID in Extra to this value")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if flag.NArg() < 1 || flag.NArg() > 2 {
|
if flag.NArg() < 1 || flag.NArg() > 2 {
|
||||||
@@ -149,30 +357,37 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasSuffix(path, gzipExt) {
|
if strings.HasSuffix(path, gzipExt) {
|
||||||
target, err := pathForUncompressing(path, target)
|
target, err = pathForUncompressing(path, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = uncompress(path, target)
|
err = uncompress(path, target, unrestrict, preserveMtime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(target)
|
os.Remove(target)
|
||||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
} else {
|
if !keep {
|
||||||
target, err := pathForCompressing(path, target)
|
_ = os.Remove(path)
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err = compress(path, target, level)
|
target, err = pathForCompressing(path, target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(target)
|
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
os.Exit(1)
|
||||||
os.Exit(1)
|
}
|
||||||
}
|
|
||||||
|
err = compress(path, target, level, includeExtra, setUID, setGID)
|
||||||
|
if err != nil {
|
||||||
|
os.Remove(target)
|
||||||
|
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if !keep {
|
||||||
|
_ = os.Remove(path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,14 +40,14 @@ func main() {
|
|||||||
usage()
|
usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
min, err := strconv.Atoi(flag.Arg(1))
|
minVal, err := strconv.Atoi(flag.Arg(1))
|
||||||
dieIf(err)
|
dieIf(err)
|
||||||
|
|
||||||
max, err := strconv.Atoi(flag.Arg(2))
|
maxVal, err := strconv.Atoi(flag.Arg(2))
|
||||||
dieIf(err)
|
dieIf(err)
|
||||||
|
|
||||||
code := kind << 6
|
code := kind << 6
|
||||||
code += (min << 3)
|
code += (minVal << 3)
|
||||||
code += max
|
code += maxVal
|
||||||
fmt.Printf("%0o\n", code)
|
fmt.Fprintf(os.Stdout, "%0o\n", code)
|
||||||
}
|
}
|
||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
@@ -47,7 +46,7 @@ func help(w io.Writer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func loadDatabase() {
|
func loadDatabase() {
|
||||||
data, err := ioutil.ReadFile(dbFile)
|
data, err := os.ReadFile(dbFile)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
partsDB = &database{
|
partsDB = &database{
|
||||||
Version: dbVersion,
|
Version: dbVersion,
|
||||||
@@ -74,7 +73,7 @@ func writeDB() {
|
|||||||
data, err := json.Marshal(partsDB)
|
data, err := json.Marshal(partsDB)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
err = ioutil.WriteFile(dbFile, data, 0644)
|
err = os.WriteFile(dbFile, data, 0644)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,13 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ext = ".bin"
|
var ext = ".bin"
|
||||||
|
|
||||||
func stripPEM(path string) error {
|
func stripPEM(path string) error {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -22,7 +21,7 @@ func stripPEM(path string) error {
|
|||||||
fmt.Fprintf(os.Stderr, " (only the first object will be decoded)\n")
|
fmt.Fprintf(os.Stderr, " (only the first object will be decoded)\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutil.WriteFile(path+ext, p.Bytes, 0644)
|
return os.WriteFile(path+ext, p.Bytes, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
@@ -21,9 +20,9 @@ func main() {
|
|||||||
|
|
||||||
path := flag.Arg(0)
|
path := flag.Arg(0)
|
||||||
if path == "-" {
|
if path == "-" {
|
||||||
in, err = ioutil.ReadAll(os.Stdin)
|
in, err = io.ReadAll(os.Stdin)
|
||||||
} else {
|
} else {
|
||||||
in, err = ioutil.ReadFile(flag.Arg(0))
|
in, err = os.ReadFile(flag.Arg(0))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Err(lib.ExitFailure, err, "couldn't read file")
|
lib.Err(lib.ExitFailure, err, "couldn't read file")
|
||||||
@@ -33,5 +32,7 @@ func main() {
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
lib.Errx(lib.ExitFailure, "%s isn't a PEM-encoded file", flag.Arg(0))
|
lib.Errx(lib.ExitFailure, "%s isn't a PEM-encoded file", flag.Arg(0))
|
||||||
}
|
}
|
||||||
fmt.Printf("%s", p.Bytes)
|
if _, err = os.Stdout.Write(p.Bytes); err != nil {
|
||||||
|
lib.Err(lib.ExitFailure, err, "writing body")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -70,7 +70,7 @@ func main() {
|
|||||||
lib.Err(lib.ExitFailure, err, "failed to read input")
|
lib.Err(lib.ExitFailure, err, "failed to read input")
|
||||||
}
|
}
|
||||||
case argc > 1:
|
case argc > 1:
|
||||||
for i := 0; i < argc; i++ {
|
for i := range argc {
|
||||||
path := flag.Arg(i)
|
path := flag.Arg(i)
|
||||||
err = copyFile(path, buf)
|
err = copyFile(path, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,14 +12,14 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
for _, fileName := range flag.Args() {
|
for _, fileName := range flag.Args() {
|
||||||
data, err := ioutil.ReadFile(fileName)
|
data, err := os.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("[+] %s:\n", fileName)
|
fmt.Fprintf(os.Stdout, "[+] %s:\n", fileName)
|
||||||
rest := data[:]
|
rest := data
|
||||||
for {
|
for {
|
||||||
var p *pem.Block
|
var p *pem.Block
|
||||||
p, rest = pem.Decode(rest)
|
p, rest = pem.Decode(rest)
|
||||||
@@ -28,13 +27,14 @@ func main() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err := x509.ParseCertificate(p.Bytes)
|
var cert *x509.Certificate
|
||||||
|
cert, err = x509.ParseCertificate(p.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
fmt.Fprintf(os.Stderr, "[!] %s: %v\n", fileName, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\t%+v\n", cert.Subject.CommonName)
|
fmt.Fprintf(os.Stdout, "\t%+v\n", cert.Subject.CommonName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -43,7 +43,7 @@ func newName(path string) (string, error) {
|
|||||||
return hashName(path, encodedHash), nil
|
return hashName(path, encodedHash), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func move(dst, src string, force bool) (err error) {
|
func move(dst, src string, force bool) error {
|
||||||
if fileutil.FileDoesExist(dst) && !force {
|
if fileutil.FileDoesExist(dst) && !force {
|
||||||
return fmt.Errorf("%s exists (pass the -f flag to overwrite)", dst)
|
return fmt.Errorf("%s exists (pass the -f flag to overwrite)", dst)
|
||||||
}
|
}
|
||||||
@@ -52,21 +52,23 @@ func move(dst, src string, force bool) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func(e error) {
|
var retErr error
|
||||||
|
defer func(e *error) {
|
||||||
dstFile.Close()
|
dstFile.Close()
|
||||||
if e != nil {
|
if *e != nil {
|
||||||
os.Remove(dst)
|
os.Remove(dst)
|
||||||
}
|
}
|
||||||
}(err)
|
}(&retErr)
|
||||||
|
|
||||||
srcFile, err := os.Open(src)
|
srcFile, err := os.Open(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
retErr = err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer srcFile.Close()
|
defer srcFile.Close()
|
||||||
|
|
||||||
_, err = io.Copy(dstFile, srcFile)
|
if _, err = io.Copy(dstFile, srcFile); err != nil {
|
||||||
if err != nil {
|
retErr = err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,6 +96,44 @@ func init() {
|
|||||||
flag.Usage = func() { usage(os.Stdout) }
|
flag.Usage = func() { usage(os.Stdout) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
dryRun, force, printChanged, verbose bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func processOne(file string, opt options) error {
|
||||||
|
renamed, err := newName(file)
|
||||||
|
if err != nil {
|
||||||
|
_, _ = lib.Warn(err, "failed to get new file name")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if opt.verbose && !opt.printChanged {
|
||||||
|
fmt.Fprintln(os.Stdout, file)
|
||||||
|
}
|
||||||
|
if renamed == file {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !opt.dryRun {
|
||||||
|
if err = move(renamed, file, opt.force); err != nil {
|
||||||
|
_, _ = lib.Warn(err, "failed to rename file from %s to %s", file, renamed)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if opt.printChanged && !opt.verbose {
|
||||||
|
fmt.Fprintln(os.Stdout, file, "->", renamed)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func run(dryRun, force, printChanged, verbose bool, files []string) {
|
||||||
|
if verbose && printChanged {
|
||||||
|
printChanged = false
|
||||||
|
}
|
||||||
|
opt := options{dryRun: dryRun, force: force, printChanged: printChanged, verbose: verbose}
|
||||||
|
for _, file := range files {
|
||||||
|
_ = processOne(file, opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var dryRun, force, printChanged, verbose bool
|
var dryRun, force, printChanged, verbose bool
|
||||||
flag.BoolVar(&force, "f", false, "force overwriting of files if there is a collision")
|
flag.BoolVar(&force, "f", false, "force overwriting of files if there is a collision")
|
||||||
@@ -102,34 +142,5 @@ func main() {
|
|||||||
flag.BoolVar(&verbose, "v", false, "list all processed files")
|
flag.BoolVar(&verbose, "v", false, "list all processed files")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
run(dryRun, force, printChanged, verbose, flag.Args())
|
||||||
if verbose && printChanged {
|
|
||||||
printChanged = false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range flag.Args() {
|
|
||||||
renamed, err := newName(file)
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "failed to get new file name")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if verbose && !printChanged {
|
|
||||||
fmt.Println(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
if renamed != file {
|
|
||||||
if !dryRun {
|
|
||||||
err = move(renamed, file, force)
|
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "failed to rename file from %s to %s", file, renamed)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if printChanged && !verbose {
|
|
||||||
fmt.Println(file, "->", renamed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -8,6 +9,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/ahash"
|
"git.wntrmute.dev/kyle/goutils/ahash"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
@@ -66,24 +68,30 @@ func main() {
|
|||||||
for _, remote := range flag.Args() {
|
for _, remote := range flag.Args() {
|
||||||
u, err := url.Parse(remote)
|
u, err := url.Parse(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "parsing %s", remote)
|
_, _ = lib.Warn(err, "parsing %s", remote)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
name := filepath.Base(u.Path)
|
name := filepath.Base(u.Path)
|
||||||
if name == "" {
|
if name == "" {
|
||||||
lib.Warnx("source URL doesn't appear to name a file")
|
_, _ = lib.Warnx("source URL doesn't appear to name a file")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Get(remote)
|
req, reqErr := http.NewRequestWithContext(context.Background(), http.MethodGet, remote, nil)
|
||||||
if err != nil {
|
if reqErr != nil {
|
||||||
lib.Warn(err, "fetching %s", remote)
|
_, _ = lib.Warn(reqErr, "building request for %s", remote)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Use proxy-aware HTTP client with a reasonable timeout for connects/handshakes
|
||||||
|
httpClient, err := lib.NewHTTPClient(lib.DialerOpts{Timeout: 30 * time.Second})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "fetching %s", remote)
|
_, _ = lib.Warn(err, "building HTTP client for %s", remote)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
_, _ = lib.Warn(err, "fetching %s", remote)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand/v2"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -17,8 +17,8 @@ func rollDie(count, sides int) []int {
|
|||||||
sum := 0
|
sum := 0
|
||||||
var rolls []int
|
var rolls []int
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for range count {
|
||||||
roll := rand.Intn(sides) + 1
|
roll := rand.IntN(sides) + 1 // #nosec G404
|
||||||
sum += roll
|
sum += roll
|
||||||
rolls = append(rolls, roll)
|
rolls = append(rolls, roll)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func init() {
|
|||||||
project = wd[len(gopath):]
|
project = wd[len(gopath):]
|
||||||
}
|
}
|
||||||
|
|
||||||
func walkFile(path string, info os.FileInfo, err error) error {
|
func walkFile(path string, _ os.FileInfo, err error) error {
|
||||||
if ignores[path] {
|
if ignores[path] {
|
||||||
return filepath.SkipDir
|
return filepath.SkipDir
|
||||||
}
|
}
|
||||||
@@ -62,22 +62,27 @@ func walkFile(path string, info os.FileInfo, err error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Println(path)
|
|
||||||
|
|
||||||
f, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debug.Println(path)
|
||||||
|
|
||||||
|
f, err2 := parser.ParseFile(fset, path, nil, parser.ImportsOnly)
|
||||||
|
if err2 != nil {
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
|
||||||
for _, importSpec := range f.Imports {
|
for _, importSpec := range f.Imports {
|
||||||
importPath := strings.Trim(importSpec.Path.Value, `"`)
|
importPath := strings.Trim(importSpec.Path.Value, `"`)
|
||||||
if stdLibRegexp.MatchString(importPath) {
|
switch {
|
||||||
|
case stdLibRegexp.MatchString(importPath):
|
||||||
debug.Println("standard lib:", importPath)
|
debug.Println("standard lib:", importPath)
|
||||||
continue
|
continue
|
||||||
} else if strings.HasPrefix(importPath, project) {
|
case strings.HasPrefix(importPath, project):
|
||||||
debug.Println("internal import:", importPath)
|
debug.Println("internal import:", importPath)
|
||||||
continue
|
continue
|
||||||
} else if strings.HasPrefix(importPath, "golang.org/") {
|
case strings.HasPrefix(importPath, "golang.org/"):
|
||||||
debug.Println("extended lib:", importPath)
|
debug.Println("extended lib:", importPath)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -102,7 +107,7 @@ func main() {
|
|||||||
ignores["vendor"] = true
|
ignores["vendor"] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, word := range strings.Split(ignoreLine, ",") {
|
for word := range strings.SplitSeq(ignoreLine, ",") {
|
||||||
ignores[strings.TrimSpace(word)] = true
|
ignores[strings.TrimSpace(word)] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
165
cmd/ski/main.go
165
cmd/ski/main.go
@@ -1,22 +1,15 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto"
|
// #nosec G505
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/asn1"
|
|
||||||
"encoding/pem"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/ski"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
"git.wntrmute.dev/kyle/goutils/lib"
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
@@ -28,10 +21,10 @@ Usage:
|
|||||||
ski [-hm] files...
|
ski [-hm] files...
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
|
-d Hex encoding mode.
|
||||||
-h Print this help message.
|
-h Print this help message.
|
||||||
-m All SKIs should match; as soon as an SKI mismatch is found,
|
-m All SKIs should match; as soon as an SKI mismatch is found,
|
||||||
it is reported.
|
it is reported.
|
||||||
|
|
||||||
`)
|
`)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,153 +32,37 @@ func init() {
|
|||||||
flag.Usage = func() { usage(os.Stderr) }
|
flag.Usage = func() { usage(os.Stderr) }
|
||||||
}
|
}
|
||||||
|
|
||||||
func parse(path string) (public []byte, kt, ft string) {
|
|
||||||
data, err := ioutil.ReadFile(path)
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
data = bytes.TrimSpace(data)
|
|
||||||
p, rest := pem.Decode(data)
|
|
||||||
if len(rest) > 0 {
|
|
||||||
lib.Warnx("trailing data in PEM file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if p == nil {
|
|
||||||
die.With("no PEM data found")
|
|
||||||
}
|
|
||||||
|
|
||||||
data = p.Bytes
|
|
||||||
|
|
||||||
switch p.Type {
|
|
||||||
case "PRIVATE KEY", "RSA PRIVATE KEY", "EC PRIVATE KEY":
|
|
||||||
public, kt = parseKey(data)
|
|
||||||
ft = "private key"
|
|
||||||
case "CERTIFICATE":
|
|
||||||
public, kt = parseCertificate(data)
|
|
||||||
ft = "certificate"
|
|
||||||
case "CERTIFICATE REQUEST":
|
|
||||||
public, kt = parseCSR(data)
|
|
||||||
ft = "certificate request"
|
|
||||||
default:
|
|
||||||
die.With("unknown PEM type %s", p.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseKey(data []byte) (public []byte, kt string) {
|
|
||||||
privInterface, err := x509.ParsePKCS8PrivateKey(data)
|
|
||||||
if err != nil {
|
|
||||||
privInterface, err = x509.ParsePKCS1PrivateKey(data)
|
|
||||||
if err != nil {
|
|
||||||
privInterface, err = x509.ParseECPrivateKey(data)
|
|
||||||
if err != nil {
|
|
||||||
die.With("couldn't parse private key.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var priv crypto.Signer
|
|
||||||
switch privInterface.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
priv = privInterface.(*rsa.PrivateKey)
|
|
||||||
kt = "RSA"
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
priv = privInterface.(*ecdsa.PrivateKey)
|
|
||||||
kt = "ECDSA"
|
|
||||||
default:
|
|
||||||
die.With("unknown private key type %T", privInterface)
|
|
||||||
}
|
|
||||||
|
|
||||||
public, err = x509.MarshalPKIXPublicKey(priv.Public())
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCertificate(data []byte) (public []byte, kt string) {
|
|
||||||
cert, err := x509.ParseCertificate(data)
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
pub := cert.PublicKey
|
|
||||||
switch pub.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
kt = "RSA"
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
kt = "ECDSA"
|
|
||||||
default:
|
|
||||||
die.With("unknown public key type %T", pub)
|
|
||||||
}
|
|
||||||
|
|
||||||
public, err = x509.MarshalPKIXPublicKey(pub)
|
|
||||||
die.If(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCSR(data []byte) (public []byte, kt string) {
|
|
||||||
csr, err := x509.ParseCertificateRequest(data)
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
pub := csr.PublicKey
|
|
||||||
switch pub.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
kt = "RSA"
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
kt = "ECDSA"
|
|
||||||
default:
|
|
||||||
die.With("unknown public key type %T", pub)
|
|
||||||
}
|
|
||||||
|
|
||||||
public, err = x509.MarshalPKIXPublicKey(pub)
|
|
||||||
die.If(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpHex(in []byte) string {
|
|
||||||
var s string
|
|
||||||
for i := range in {
|
|
||||||
s += fmt.Sprintf("%02X:", in[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Trim(s, ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
type subjectPublicKeyInfo struct {
|
|
||||||
Algorithm pkix.AlgorithmIdentifier
|
|
||||||
SubjectPublicKey asn1.BitString
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var help, shouldMatch bool
|
var help, shouldMatch bool
|
||||||
|
var displayModeString string
|
||||||
|
flag.StringVar(&displayModeString, "d", "lower", "hex encoding mode")
|
||||||
flag.BoolVar(&help, "h", false, "print a help message and exit")
|
flag.BoolVar(&help, "h", false, "print a help message and exit")
|
||||||
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
|
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
displayMode := lib.ParseHexEncodeMode(displayModeString)
|
||||||
|
|
||||||
if help {
|
if help {
|
||||||
usage(os.Stdout)
|
usage(os.Stdout)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ski string
|
var matchSKI string
|
||||||
for _, path := range flag.Args() {
|
for _, path := range flag.Args() {
|
||||||
public, kt, ft := parse(path)
|
keyInfo, err := ski.ParsePEM(path)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
var subPKI subjectPublicKeyInfo
|
keySKI, err := keyInfo.SKI(displayMode)
|
||||||
_, err := asn1.Unmarshal(public, &subPKI)
|
die.If(err)
|
||||||
if err != nil {
|
|
||||||
lib.Warn(err, "failed to get subject PKI")
|
if matchSKI == "" {
|
||||||
continue
|
matchSKI = keySKI
|
||||||
}
|
}
|
||||||
|
|
||||||
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
|
if shouldMatch && matchSKI != keySKI {
|
||||||
pubHashString := dumpHex(pubHash[:])
|
_, _ = lib.Warnx("%s: SKI mismatch (%s != %s)",
|
||||||
if ski == "" {
|
path, matchSKI, keySKI)
|
||||||
ski = pubHashString
|
|
||||||
}
|
}
|
||||||
|
fmt.Printf("%s %s (%s %s)\n", path, keySKI, keyInfo.KeyType, keyInfo.FileType)
|
||||||
if shouldMatch && ski != pubHashString {
|
|
||||||
lib.Warnx("%s: SKI mismatch (%s != %s)",
|
|
||||||
path, ski, pubHashString)
|
|
||||||
}
|
|
||||||
fmt.Printf("%s %s (%s %s)\n", path, pubHashString, kt, ft)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,17 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func proxy(conn net.Conn, inside string) error {
|
func proxy(conn net.Conn, inside string) error {
|
||||||
proxyConn, err := net.Dial("tcp", inside)
|
proxyConn, err := (&net.Dialer{}).DialContext(context.Background(), "tcp", inside)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -19,7 +20,7 @@ func proxy(conn net.Conn, inside string) error {
|
|||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
io.Copy(conn, proxyConn)
|
_, _ = io.Copy(conn, proxyConn)
|
||||||
}()
|
}()
|
||||||
_, err = io.Copy(proxyConn, conn)
|
_, err = io.Copy(proxyConn, conn)
|
||||||
return err
|
return err
|
||||||
@@ -31,16 +32,22 @@ func main() {
|
|||||||
flag.StringVar(&inside, "p", "4000", "inside port")
|
flag.StringVar(&inside, "p", "4000", "inside port")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
l, err := net.Listen("tcp", "0.0.0.0:"+outside)
|
lc := &net.ListenConfig{}
|
||||||
|
l, err := lc.Listen(context.Background(), "tcp", "0.0.0.0:"+outside)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
conn, err := l.Accept()
|
var conn net.Conn
|
||||||
|
conn, err = l.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
_, _ = lib.Warn(err, "accept failed")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
go proxy(conn, "127.0.0.1:"+inside)
|
go func() {
|
||||||
|
if err = proxy(conn, "127.0.0.1:"+inside); err != nil {
|
||||||
|
_, _ = lib.Warn(err, "proxy error")
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
@@ -8,7 +9,6 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cfg := &tls.Config{}
|
cfg := &tls.Config{} // #nosec G402
|
||||||
|
|
||||||
var sysRoot, listenAddr, certFile, keyFile string
|
var sysRoot, listenAddr, certFile, keyFile string
|
||||||
var verify bool
|
var verify bool
|
||||||
@@ -47,7 +47,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
cfg.Certificates = append(cfg.Certificates, cert)
|
cfg.Certificates = append(cfg.Certificates, cert)
|
||||||
if sysRoot != "" {
|
if sysRoot != "" {
|
||||||
pemList, err := ioutil.ReadFile(sysRoot)
|
var pemList []byte
|
||||||
|
pemList, err = os.ReadFile(sysRoot)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
roots := x509.NewCertPool()
|
roots := x509.NewCertPool()
|
||||||
@@ -59,48 +60,54 @@ func main() {
|
|||||||
cfg.RootCAs = roots
|
cfg.RootCAs = roots
|
||||||
}
|
}
|
||||||
|
|
||||||
l, err := net.Listen("tcp", listenAddr)
|
lc := &net.ListenConfig{}
|
||||||
|
l, err := lc.Listen(context.Background(), "tcp", listenAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err.Error())
|
fmt.Println(err.Error())
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
conn, err := l.Accept()
|
var conn net.Conn
|
||||||
|
conn, err = l.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err.Error())
|
fmt.Println(err.Error())
|
||||||
}
|
|
||||||
|
|
||||||
raddr := conn.RemoteAddr()
|
|
||||||
tconn := tls.Server(conn, cfg)
|
|
||||||
err = tconn.Handshake()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("[+] %v: failed to complete handshake: %v\n", raddr, err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cs := tconn.ConnectionState()
|
handleConn(conn, cfg)
|
||||||
if len(cs.PeerCertificates) == 0 {
|
|
||||||
fmt.Printf("[+] %v: no chain presented\n", raddr)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var chain []byte
|
|
||||||
for _, cert := range cs.PeerCertificates {
|
|
||||||
p := &pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: cert.Raw,
|
|
||||||
}
|
|
||||||
chain = append(chain, pem.EncodeToMemory(p)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
var nonce [16]byte
|
|
||||||
_, err = rand.Read(nonce[:])
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fname := fmt.Sprintf("%v-%v.pem", raddr, hex.EncodeToString(nonce[:]))
|
|
||||||
err = ioutil.WriteFile(fname, chain, 0644)
|
|
||||||
die.If(err)
|
|
||||||
fmt.Printf("%v: [+] wrote %v.\n", raddr, fname)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleConn performs a TLS handshake, extracts the peer chain, and writes it to a file.
|
||||||
|
func handleConn(conn net.Conn, cfg *tls.Config) {
|
||||||
|
defer conn.Close()
|
||||||
|
raddr := conn.RemoteAddr()
|
||||||
|
tconn := tls.Server(conn, cfg)
|
||||||
|
if err := tconn.HandshakeContext(context.Background()); err != nil {
|
||||||
|
fmt.Printf("[+] %v: failed to complete handshake: %v\n", raddr, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cs := tconn.ConnectionState()
|
||||||
|
if len(cs.PeerCertificates) == 0 {
|
||||||
|
fmt.Printf("[+] %v: no chain presented\n", raddr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var chain []byte
|
||||||
|
for _, cert := range cs.PeerCertificates {
|
||||||
|
p := &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
|
||||||
|
chain = append(chain, pem.EncodeToMemory(p)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nonce [16]byte
|
||||||
|
if _, err := rand.Read(nonce[:]); err != nil {
|
||||||
|
fmt.Printf("[+] %v: failed to generate filename nonce: %v\n", raddr, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fname := fmt.Sprintf("%v-%v.pem", raddr, hex.EncodeToString(nonce[:]))
|
||||||
|
if err := os.WriteFile(fname, chain, 0o644); err != nil {
|
||||||
|
fmt.Printf("[+] %v: failed to write %v: %v\n", raddr, fname, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("%v: [+] wrote %v.\n", raddr, fname)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,54 +1,50 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var cfg = &tls.Config{}
|
|
||||||
|
|
||||||
var sysRoot, serverName string
|
var sysRoot, serverName string
|
||||||
|
var skipVerify bool
|
||||||
|
var strictTLS bool
|
||||||
|
lib.StrictTLSFlag(&strictTLS)
|
||||||
flag.StringVar(&sysRoot, "ca", "", "provide an alternate CA bundle")
|
flag.StringVar(&sysRoot, "ca", "", "provide an alternate CA bundle")
|
||||||
flag.StringVar(&cfg.ServerName, "sni", cfg.ServerName, "provide an SNI name")
|
flag.StringVar(&serverName, "sni", "", "provide an SNI name")
|
||||||
flag.BoolVar(&cfg.InsecureSkipVerify, "noverify", false, "don't verify certificates")
|
flag.BoolVar(&skipVerify, "noverify", false, "don't verify certificates")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
tlsCfg, err := lib.BaselineTLSConfig(skipVerify, strictTLS)
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
if sysRoot != "" {
|
if sysRoot != "" {
|
||||||
pemList, err := ioutil.ReadFile(sysRoot)
|
tlsCfg.RootCAs, err = certlib.LoadPEMCertPool(sysRoot)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
roots := x509.NewCertPool()
|
|
||||||
if !roots.AppendCertsFromPEM(pemList) {
|
|
||||||
fmt.Printf("[!] no valid roots found")
|
|
||||||
roots = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg.RootCAs = roots
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if serverName != "" {
|
if serverName != "" {
|
||||||
cfg.ServerName = serverName
|
tlsCfg.ServerName = serverName
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, site := range flag.Args() {
|
for _, site := range flag.Args() {
|
||||||
_, _, err := net.SplitHostPort(site)
|
_, _, err = net.SplitHostPort(site)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
site += ":443"
|
site += ":443"
|
||||||
}
|
}
|
||||||
conn, err := tls.Dial("tcp", site, cfg)
|
|
||||||
if err != nil {
|
var conn *tls.Conn
|
||||||
fmt.Println(err.Error())
|
conn, err = lib.DialTLS(context.Background(), site, lib.DialerOpts{TLSConfig: tlsCfg})
|
||||||
os.Exit(1)
|
die.If(err)
|
||||||
}
|
|
||||||
|
|
||||||
cs := conn.ConnectionState()
|
cs := conn.ConnectionState()
|
||||||
var chain []byte
|
var chain []byte
|
||||||
@@ -61,8 +57,9 @@ func main() {
|
|||||||
chain = append(chain, pem.EncodeToMemory(p)...)
|
chain = append(chain, pem.EncodeToMemory(p)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(site+".pem", chain, 0644)
|
err = os.WriteFile(site+".pem", chain, 0644)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
fmt.Printf("[+] wrote %s.pem.\n", site)
|
fmt.Printf("[+] wrote %s.pem.\n", site)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -60,7 +60,7 @@ func printDigests(paths []string, issuer bool) {
|
|||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
cert, err := certlib.LoadCertificate(path)
|
cert, err := certlib.LoadCertificate(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lib.Warn(err, "failed to load certificate from %s", path)
|
_, _ = lib.Warn(err, "failed to load certificate from %s", path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,20 +75,19 @@ func matchDigests(paths []string, issuer bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var invalid int
|
var invalid int
|
||||||
for {
|
for len(paths) > 0 {
|
||||||
if len(paths) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fst := paths[0]
|
fst := paths[0]
|
||||||
snd := paths[1]
|
snd := paths[1]
|
||||||
paths = paths[2:]
|
paths = paths[2:]
|
||||||
|
|
||||||
fstCert, err := certlib.LoadCertificate(fst)
|
fstCert, err := certlib.LoadCertificate(fst)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
sndCert, err := certlib.LoadCertificate(snd)
|
sndCert, err := certlib.LoadCertificate(snd)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
|
if !bytes.Equal(getSubjectInfoHash(fstCert, issuer), getSubjectInfoHash(sndCert, issuer)) {
|
||||||
lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
_, _ = lib.Warnx("certificates don't match: %s and %s", fst, snd)
|
||||||
invalid++
|
invalid++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -13,16 +18,19 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
hostPort := os.Args[1]
|
hostPort, err := hosts.ParseHost(os.Args[1])
|
||||||
conn, err := tls.Dial("tcp", hostPort, &tls.Config{
|
die.If(err)
|
||||||
InsecureSkipVerify: true,
|
|
||||||
})
|
// Use proxy-aware TLS dialer; skip verification as before
|
||||||
|
conn, err := lib.DialTLS(
|
||||||
|
context.Background(),
|
||||||
|
hostPort.String(),
|
||||||
|
lib.DialerOpts{TLSConfig: &tls.Config{InsecureSkipVerify: true}},
|
||||||
|
) // #nosec G402
|
||||||
|
die.If(err)
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Failed to connect to the TLS server: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
state := conn.ConnectionState()
|
state := conn.ConnectionState()
|
||||||
printConnectionDetails(state)
|
printConnectionDetails(state)
|
||||||
}
|
}
|
||||||
@@ -37,7 +45,6 @@ func printConnectionDetails(state tls.ConnectionState) {
|
|||||||
|
|
||||||
func tlsVersion(version uint16) string {
|
func tlsVersion(version uint16) string {
|
||||||
switch version {
|
switch version {
|
||||||
|
|
||||||
case tls.VersionTLS13:
|
case tls.VersionTLS13:
|
||||||
return "TLS 1.3"
|
return "TLS 1.3"
|
||||||
case tls.VersionTLS12:
|
case tls.VersionTLS12:
|
||||||
|
|||||||
@@ -1,161 +1,33 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"flag"
|
||||||
"crypto"
|
"fmt"
|
||||||
"crypto/ecdsa"
|
"os"
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"git.wntrmute.dev/kyle/goutils/die"
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/die"
|
||||||
)
|
)
|
||||||
|
|
||||||
var validPEMs = map[string]bool{
|
// functionality refactored into certlib
|
||||||
"PRIVATE KEY": true,
|
|
||||||
"RSA PRIVATE KEY": true,
|
|
||||||
"EC PRIVATE KEY": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
curveInvalid = iota // any invalid curve
|
|
||||||
curveRSA // indicates key is an RSA key, not an EC key
|
|
||||||
curveP256
|
|
||||||
curveP384
|
|
||||||
curveP521
|
|
||||||
)
|
|
||||||
|
|
||||||
func getECCurve(pub interface{}) int {
|
|
||||||
switch pub := pub.(type) {
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
switch pub.Curve {
|
|
||||||
case elliptic.P256():
|
|
||||||
return curveP256
|
|
||||||
case elliptic.P384():
|
|
||||||
return curveP384
|
|
||||||
case elliptic.P521():
|
|
||||||
return curveP521
|
|
||||||
default:
|
|
||||||
return curveInvalid
|
|
||||||
}
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return curveRSA
|
|
||||||
default:
|
|
||||||
return curveInvalid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadKey(path string) (crypto.Signer, error) {
|
|
||||||
in, err := ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
in = bytes.TrimSpace(in)
|
|
||||||
p, _ := pem.Decode(in)
|
|
||||||
if p != nil {
|
|
||||||
if !validPEMs[p.Type] {
|
|
||||||
return nil, errors.New("invalid private key file type " + p.Type)
|
|
||||||
}
|
|
||||||
in = p.Bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
priv, err := x509.ParsePKCS8PrivateKey(in)
|
|
||||||
if err != nil {
|
|
||||||
priv, err = x509.ParsePKCS1PrivateKey(in)
|
|
||||||
if err != nil {
|
|
||||||
priv, err = x509.ParseECPrivateKey(in)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch priv.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
return priv.(*rsa.PrivateKey), nil
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
return priv.(*ecdsa.PrivateKey), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// should never reach here
|
|
||||||
return nil, errors.New("invalid private key")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var keyFile, certFile string
|
var keyFile, certFile string
|
||||||
flag.StringVar(&keyFile, "k", "", "TLS private `key` file")
|
flag.StringVar(&keyFile, "k", "", "TLS private `key` file")
|
||||||
flag.StringVar(&certFile, "c", "", "TLS `certificate` file")
|
flag.StringVar(&certFile, "c", "", "TLS `certificate` file")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
in, err := ioutil.ReadFile(certFile)
|
cert, err := certlib.LoadCertificate(certFile)
|
||||||
die.If(err)
|
die.If(err)
|
||||||
|
|
||||||
p, _ := pem.Decode(in)
|
priv, err := certlib.LoadPrivateKey(keyFile)
|
||||||
if p != nil {
|
die.If(err)
|
||||||
if p.Type != "CERTIFICATE" {
|
|
||||||
die.With("invalid certificate (type is %s)", p.Type)
|
|
||||||
}
|
|
||||||
in = p.Bytes
|
|
||||||
}
|
|
||||||
cert, err := x509.ParseCertificate(in)
|
|
||||||
die.If(err)
|
|
||||||
|
|
||||||
priv, err := loadKey(keyFile)
|
matched, reason := certlib.MatchKeys(cert, priv)
|
||||||
die.If(err)
|
if matched {
|
||||||
|
fmt.Println("Match.")
|
||||||
switch pub := priv.Public().(type) {
|
return
|
||||||
case *rsa.PublicKey:
|
}
|
||||||
switch certPub := cert.PublicKey.(type) {
|
fmt.Printf("No match (%s).\n", reason)
|
||||||
case *rsa.PublicKey:
|
os.Exit(1)
|
||||||
if pub.N.Cmp(certPub.N) != 0 || pub.E != certPub.E {
|
|
||||||
fmt.Println("No match (public keys don't match).")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
fmt.Println("Match.")
|
|
||||||
return
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
fmt.Println("No match (RSA private key, EC public key).")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
privCurve := getECCurve(pub)
|
|
||||||
certCurve := getECCurve(cert.PublicKey)
|
|
||||||
log.Printf("priv: %d\tcert: %d\n", privCurve, certCurve)
|
|
||||||
|
|
||||||
if certCurve == curveRSA {
|
|
||||||
fmt.Println("No match (private key is EC, certificate is RSA).")
|
|
||||||
os.Exit(1)
|
|
||||||
} else if privCurve == curveInvalid {
|
|
||||||
fmt.Println("No match (invalid private key curve).")
|
|
||||||
os.Exit(1)
|
|
||||||
} else if privCurve != certCurve {
|
|
||||||
fmt.Println("No match (EC curves don't match).")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
certPub := cert.PublicKey.(*ecdsa.PublicKey)
|
|
||||||
if pub.X.Cmp(certPub.X) != 0 {
|
|
||||||
fmt.Println("No match (public keys don't match).")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pub.Y.Cmp(certPub.Y) != 0 {
|
|
||||||
fmt.Println("No match (public keys don't match).")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Match.")
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unrecognised private key type: %T\n", priv.Public())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -201,10 +201,6 @@ func init() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fromLoc == time.UTC {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
toLoc = time.UTC
|
toLoc = time.UTC
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -257,15 +253,16 @@ func main() {
|
|||||||
showTime(time.Now())
|
showTime(time.Now())
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
case 1:
|
case 1:
|
||||||
if flag.Arg(0) == "-" {
|
switch {
|
||||||
|
case flag.Arg(0) == "-":
|
||||||
s := bufio.NewScanner(os.Stdin)
|
s := bufio.NewScanner(os.Stdin)
|
||||||
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
times = append(times, s.Text())
|
times = append(times, s.Text())
|
||||||
}
|
}
|
||||||
} else if flag.Arg(0) == "help" {
|
case flag.Arg(0) == "help":
|
||||||
usageExamples()
|
usageExamples()
|
||||||
} else {
|
default:
|
||||||
times = flag.Args()
|
times = flag.Args()
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
@@ -12,9 +11,8 @@ import (
|
|||||||
|
|
||||||
type empty struct{}
|
type empty struct{}
|
||||||
|
|
||||||
func errorf(format string, args ...interface{}) {
|
func errorf(path string, err error) {
|
||||||
format += "\n"
|
fmt.Fprintf(os.Stderr, "%s FAILED: %s\n", path, err)
|
||||||
fmt.Fprintf(os.Stderr, format, args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func usage(w io.Writer) {
|
func usage(w io.Writer) {
|
||||||
@@ -44,16 +42,16 @@ func main() {
|
|||||||
|
|
||||||
if flag.NArg() == 1 && flag.Arg(0) == "-" {
|
if flag.NArg() == 1 && flag.Arg(0) == "-" {
|
||||||
path := "stdin"
|
path := "stdin"
|
||||||
in, err := ioutil.ReadAll(os.Stdin)
|
in, err := io.ReadAll(os.Stdin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%s FAILED: %s", path, err)
|
errorf(path, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
var e empty
|
var e empty
|
||||||
err = yaml.Unmarshal(in, &e)
|
err = yaml.Unmarshal(in, &e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%s FAILED: %s", path, err)
|
errorf(path, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,16 +63,16 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range flag.Args() {
|
for _, path := range flag.Args() {
|
||||||
in, err := ioutil.ReadFile(path)
|
in, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%s FAILED: %s", path, err)
|
errorf(path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var e empty
|
var e empty
|
||||||
err = yaml.Unmarshal(in, &e)
|
err = yaml.Unmarshal(in, &e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%s FAILED: %s", path, err)
|
errorf(path, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,16 +14,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultDirectory = ".git/objects"
|
const defaultDirectory = ".git/objects"
|
||||||
|
|
||||||
func errorf(format string, a ...interface{}) {
|
// maxDecompressedSize limits how many bytes we will decompress from a zlib
|
||||||
fmt.Fprintf(os.Stderr, format, a...)
|
// stream to mitigate decompression bombs (gosec G110).
|
||||||
if format[len(format)-1] != '\n' {
|
// Increase this if you expect larger objects.
|
||||||
fmt.Fprintf(os.Stderr, "\n")
|
const maxDecompressedSize int64 = 64 << 30 // 64 GiB
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDir(path string) bool {
|
func isDir(path string) bool {
|
||||||
fi, err := os.Stat(path)
|
fi, err := os.Stat(path)
|
||||||
@@ -48,17 +48,21 @@ func loadFile(path string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
defer zread.Close()
|
defer zread.Close()
|
||||||
|
|
||||||
_, err = io.Copy(buf, zread)
|
// Protect against decompression bombs by limiting how much we read.
|
||||||
if err != nil {
|
lr := io.LimitReader(zread, maxDecompressedSize+1)
|
||||||
|
if _, err = buf.ReadFrom(lr); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if int64(buf.Len()) > maxDecompressedSize {
|
||||||
|
return nil, fmt.Errorf("decompressed size exceeds limit (%d bytes)", maxDecompressedSize)
|
||||||
|
}
|
||||||
return buf.Bytes(), nil
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func showFile(path string) {
|
func showFile(path string) {
|
||||||
fileData, err := loadFile(path)
|
fileData, err := loadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%v", err)
|
lib.Warn(err, "failed to load %s", path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,39 +72,71 @@ func showFile(path string) {
|
|||||||
func searchFile(path string, search *regexp.Regexp) error {
|
func searchFile(path string, search *regexp.Regexp) error {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%v", err)
|
lib.Warn(err, "failed to open %s", path)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
zread, err := zlib.NewReader(file)
|
zread, err := zlib.NewReader(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("%v", err)
|
lib.Warn(err, "failed to decompress %s", path)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer zread.Close()
|
defer zread.Close()
|
||||||
|
|
||||||
zbuf := bufio.NewReader(zread)
|
// Limit how much we scan to avoid DoS via huge decompression.
|
||||||
if search.MatchReader(zbuf) {
|
lr := io.LimitReader(zread, maxDecompressedSize+1)
|
||||||
fileData, err := loadFile(path)
|
zbuf := bufio.NewReader(lr)
|
||||||
if err != nil {
|
if !search.MatchReader(zbuf) {
|
||||||
errorf("%v", err)
|
return nil
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("%s:\n%s\n", path, fileData)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fileData, err := loadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
lib.Warn(err, "failed to load %s", path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("%s:\n%s\n", path, fileData)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
|
func buildWalker(searchExpr *regexp.Regexp) filepath.WalkFunc {
|
||||||
return func(path string, info os.FileInfo, err error) error {
|
return func(path string, info os.FileInfo, _ error) error {
|
||||||
if info.Mode().IsRegular() {
|
if !info.Mode().IsRegular() {
|
||||||
return searchFile(path, searchExpr)
|
return nil
|
||||||
}
|
}
|
||||||
return nil
|
return searchFile(path, searchExpr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// runSearch compiles the search expression and processes the provided paths.
|
||||||
|
// It returns an error for fatal conditions; per-file errors are logged.
|
||||||
|
func runSearch(expr string) error {
|
||||||
|
search, err := regexp.Compile(expr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid regexp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pathList := flag.Args()
|
||||||
|
if len(pathList) == 0 {
|
||||||
|
pathList = []string{defaultDirectory}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range pathList {
|
||||||
|
if isDir(path) {
|
||||||
|
if err2 := filepath.Walk(path, buildWalker(search)); err2 != nil {
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err2 := searchFile(path, search); err2 != nil {
|
||||||
|
// Non-fatal: keep going, but report it.
|
||||||
|
lib.Warn(err2, "non-fatal error while searching files")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
|
flSearch := flag.String("s", "", "search string (should be an RE2 regular expression)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
@@ -109,28 +145,10 @@ func main() {
|
|||||||
for _, path := range flag.Args() {
|
for _, path := range flag.Args() {
|
||||||
showFile(path)
|
showFile(path)
|
||||||
}
|
}
|
||||||
} else {
|
return
|
||||||
search, err := regexp.Compile(*flSearch)
|
}
|
||||||
if err != nil {
|
|
||||||
errorf("Bad regexp: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pathList := flag.Args()
|
if err := runSearch(*flSearch); err != nil {
|
||||||
if len(pathList) == 0 {
|
lib.Err(lib.ExitFailure, err, "failed to run search")
|
||||||
pathList = []string{defaultDirectory}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, path := range pathList {
|
|
||||||
if isDir(path) {
|
|
||||||
err := filepath.Walk(path, buildWalker(search))
|
|
||||||
if err != nil {
|
|
||||||
errorf("%v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
searchFile(path, search)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
52
dbg/dbg.go
52
dbg/dbg.go
@@ -1,12 +1,34 @@
|
|||||||
// Package dbg implements a debug printer.
|
// Package dbg implements a simple debug printer.
|
||||||
|
//
|
||||||
|
// There are two main ways to use it:
|
||||||
|
// - By using one of the constructors and calling flag.BoolVar(&debug.Enabled...)
|
||||||
|
// - By setting the environment variable GOUTILS_ENABLE_DEBUG to true or false and
|
||||||
|
// calling NewFromEnv().
|
||||||
|
//
|
||||||
|
// If enabled, any of the print statements will be written to stdout. Otherwise,
|
||||||
|
// nothing will be emitted.
|
||||||
package dbg
|
package dbg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const DebugEnvKey = "GOUTILS_ENABLE_DEBUG"
|
||||||
|
|
||||||
|
var enabledValues = map[string]bool{
|
||||||
|
"1": true,
|
||||||
|
"true": true,
|
||||||
|
"yes": true,
|
||||||
|
"on": true,
|
||||||
|
"y": true,
|
||||||
|
"enable": true,
|
||||||
|
"enabled": true,
|
||||||
|
}
|
||||||
|
|
||||||
// A DebugPrinter is a drop-in replacement for fmt.Print*, and also acts as
|
// A DebugPrinter is a drop-in replacement for fmt.Print*, and also acts as
|
||||||
// an io.WriteCloser when enabled.
|
// an io.WriteCloser when enabled.
|
||||||
type DebugPrinter struct {
|
type DebugPrinter struct {
|
||||||
@@ -15,6 +37,23 @@ type DebugPrinter struct {
|
|||||||
out io.WriteCloser
|
out io.WriteCloser
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New returns a new DebugPrinter on os.Stdout.
|
||||||
|
func New() *DebugPrinter {
|
||||||
|
return &DebugPrinter{
|
||||||
|
out: os.Stderr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFromEnv returns a new DebugPrinter based on the value of the environment
|
||||||
|
// variable GOUTILS_ENABLE_DEBUG.
|
||||||
|
func NewFromEnv() *DebugPrinter {
|
||||||
|
enabled := strings.ToLower(os.Getenv(DebugEnvKey))
|
||||||
|
return &DebugPrinter{
|
||||||
|
out: os.Stderr,
|
||||||
|
Enabled: enabledValues[enabled],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Close satisfies the Closer interface.
|
// Close satisfies the Closer interface.
|
||||||
func (dbg *DebugPrinter) Close() error {
|
func (dbg *DebugPrinter) Close() error {
|
||||||
return dbg.out.Close()
|
return dbg.out.Close()
|
||||||
@@ -28,13 +67,6 @@ func (dbg *DebugPrinter) Write(p []byte) (int, error) {
|
|||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new DebugPrinter on os.Stdout.
|
|
||||||
func New() *DebugPrinter {
|
|
||||||
return &DebugPrinter{
|
|
||||||
out: os.Stdout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFile sets up a new DebugPrinter to a file, truncating it if it exists.
|
// ToFile sets up a new DebugPrinter to a file, truncating it if it exists.
|
||||||
func ToFile(path string) (*DebugPrinter, error) {
|
func ToFile(path string) (*DebugPrinter, error) {
|
||||||
file, err := os.Create(path)
|
file, err := os.Create(path)
|
||||||
@@ -74,3 +106,7 @@ func (dbg *DebugPrinter) Printf(format string, v ...any) {
|
|||||||
fmt.Fprintf(dbg.out, format, v...)
|
fmt.Fprintf(dbg.out, format, v...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dbg *DebugPrinter) StackTrace() {
|
||||||
|
dbg.Write(debug.Stack())
|
||||||
|
}
|
||||||
|
|||||||
7
go.mod
7
go.mod
@@ -5,20 +5,23 @@ go 1.24.0
|
|||||||
require (
|
require (
|
||||||
github.com/hashicorp/go-syslog v1.0.0
|
github.com/hashicorp/go-syslog v1.0.0
|
||||||
github.com/kr/text v0.2.0
|
github.com/kr/text v0.2.0
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
github.com/pkg/sftp v1.12.0
|
github.com/pkg/sftp v1.12.0
|
||||||
golang.org/x/crypto v0.44.0
|
golang.org/x/crypto v0.39.0
|
||||||
|
golang.org/x/net v0.38.0
|
||||||
golang.org/x/sys v0.38.0
|
golang.org/x/sys v0.38.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/benbjohnson/clock v1.3.5
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/google/certificate-transparency-go v1.0.21
|
github.com/google/certificate-transparency-go v1.0.21
|
||||||
|
rsc.io/qr v0.2.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
16
go.sum
16
go.sum
@@ -1,3 +1,5 @@
|
|||||||
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
|
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
@@ -25,19 +27,21 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
|
|||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
|
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||||
|
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||||
|
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
|
||||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||||
|
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
@@ -46,3 +50,5 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||||
|
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||||
|
|||||||
@@ -11,3 +11,11 @@ const (
|
|||||||
// ExitFailure is the failing exit status.
|
// ExitFailure is the failing exit status.
|
||||||
ExitFailure = 1
|
ExitFailure = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
OneTrueDateFormat = "2006-01-02T15:04:05-0700"
|
||||||
|
DateShortFormat = "2006-01-02"
|
||||||
|
TimeShortFormat = "15:04:05"
|
||||||
|
TimeShorterFormat = "15:04"
|
||||||
|
TimeStandardDateTime = "2006-01-02 15:04"
|
||||||
|
)
|
||||||
|
|||||||
525
lib/dialer.go
Normal file
525
lib/dialer.go
Normal file
@@ -0,0 +1,525 @@
|
|||||||
|
// Package lib contains reusable helpers. This file provides proxy-aware
|
||||||
|
// dialers for plain TCP and TLS connections using environment variables.
|
||||||
|
//
|
||||||
|
// Supported proxy environment variables (checked case-insensitively):
|
||||||
|
// - SOCKS5_PROXY (e.g., socks5://user:pass@host:1080)
|
||||||
|
// - HTTPS_PROXY (e.g., https://user:pass@host:443)
|
||||||
|
// - HTTP_PROXY (e.g., http://user:pass@host:3128)
|
||||||
|
//
|
||||||
|
// Precedence when multiple proxies are set (both for net and TLS dialers):
|
||||||
|
// 1. SOCKS5_PROXY
|
||||||
|
// 2. HTTPS_PROXY
|
||||||
|
// 3. HTTP_PROXY
|
||||||
|
//
|
||||||
|
// Both uppercase and lowercase variable names are honored.
|
||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
xproxy "golang.org/x/net/proxy"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/dbg"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StrictBaselineTLSConfig returns a secure TLS config.
|
||||||
|
// Many of the tools in this repo are designed to debug broken TLS systems
|
||||||
|
// and therefore explicitly support old or insecure TLS setups.
|
||||||
|
func StrictBaselineTLSConfig() *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
InsecureSkipVerify: false, // explicitly set
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func StrictTLSFlag(useStrict *bool) {
|
||||||
|
flag.BoolVar(useStrict, "strict-tls", false, "Use strict TLS configuration (disables certificate verification)")
|
||||||
|
}
|
||||||
|
|
||||||
|
func BaselineTLSConfig(skipVerify bool, secure bool) (*tls.Config, error) {
|
||||||
|
if secure && skipVerify {
|
||||||
|
return nil, errors.New("cannot skip verification and use secure TLS")
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipVerify {
|
||||||
|
return &tls.Config{InsecureSkipVerify: true}, nil // #nosec G402 - intentional
|
||||||
|
}
|
||||||
|
|
||||||
|
if secure {
|
||||||
|
return StrictBaselineTLSConfig(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tls.Config{}, nil // #nosec G402 - intentional
|
||||||
|
}
|
||||||
|
|
||||||
|
var debug = dbg.NewFromEnv()
|
||||||
|
|
||||||
|
// DialerOpts controls creation of proxy-aware dialers.
|
||||||
|
//
|
||||||
|
// Timeout controls the maximum amount of time spent establishing the
|
||||||
|
// underlying TCP connection and any proxy handshake. If zero, a
|
||||||
|
// reasonable default (30s) is used.
|
||||||
|
//
|
||||||
|
// TLSConfig is used by the TLS dialer to configure the TLS handshake to
|
||||||
|
// the target endpoint. If TLSConfig.ServerName is empty, it will be set
|
||||||
|
// from the host portion of the address passed to DialContext.
|
||||||
|
type DialerOpts struct {
|
||||||
|
Timeout time.Duration
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextDialer matches the common DialContext signature used by net and tls dialers.
|
||||||
|
type ContextDialer interface {
|
||||||
|
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialTCP is a convenience helper that dials a TCP connection to address
|
||||||
|
// using a proxy-aware dialer derived from opts. It honors SOCKS5_PROXY,
|
||||||
|
// HTTPS_PROXY, and HTTP_PROXY environment variables.
|
||||||
|
func DialTCP(ctx context.Context, address string, opts DialerOpts) (net.Conn, error) {
|
||||||
|
d, err := NewNetDialer(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d.DialContext(ctx, "tcp", address)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialTLS is a convenience helper that dials a TLS-wrapped TCP connection to
|
||||||
|
// address using a proxy-aware dialer derived from opts. It returns a *tls.Conn.
|
||||||
|
// It honors SOCKS5_PROXY, HTTPS_PROXY, and HTTP_PROXY environment variables and
|
||||||
|
// uses opts.TLSConfig for the handshake (filling ServerName from address if empty).
|
||||||
|
func DialTLS(ctx context.Context, address string, opts DialerOpts) (*tls.Conn, error) {
|
||||||
|
d, err := NewTLSDialer(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := d.DialContext(ctx, "tcp", address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConn, ok := c.(*tls.Conn)
|
||||||
|
if !ok {
|
||||||
|
_ = c.Close()
|
||||||
|
return nil, fmt.Errorf("DialTLS: expected *tls.Conn, got %T", c)
|
||||||
|
}
|
||||||
|
return tlsConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetDialer returns a ContextDialer that dials TCP connections using
|
||||||
|
// proxies discovered from the environment (SOCKS5_PROXY, HTTPS_PROXY, HTTP_PROXY).
|
||||||
|
// The returned dialer supports context cancellation for direct and HTTP(S)
|
||||||
|
// proxies and applies the configured timeout to connection/proxy handshake.
|
||||||
|
func NewNetDialer(opts DialerOpts) (ContextDialer, error) {
|
||||||
|
if opts.Timeout <= 0 {
|
||||||
|
opts.Timeout = 30 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
if u := getProxyURLFromEnv("SOCKS5_PROXY"); u != nil {
|
||||||
|
debug.Printf("using SOCKS5 proxy %q\n", u)
|
||||||
|
return newSOCKS5Dialer(u, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if u := getProxyURLFromEnv("HTTPS_PROXY"); u != nil {
|
||||||
|
// Respect the proxy URL scheme. Zscaler may set HTTPS_PROXY to an HTTP proxy
|
||||||
|
// running locally; in that case we must NOT TLS-wrap the proxy connection.
|
||||||
|
debug.Printf("using HTTPS proxy %q\n", u)
|
||||||
|
return &httpProxyDialer{
|
||||||
|
proxyURL: u,
|
||||||
|
timeout: opts.Timeout,
|
||||||
|
secure: strings.EqualFold(u.Scheme, "https"),
|
||||||
|
config: opts.TLSConfig,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if u := getProxyURLFromEnv("HTTP_PROXY"); u != nil {
|
||||||
|
debug.Printf("using HTTP proxy %q\n", u)
|
||||||
|
return &httpProxyDialer{
|
||||||
|
proxyURL: u,
|
||||||
|
timeout: opts.Timeout,
|
||||||
|
// Only TLS-wrap the proxy connection if the URL scheme is https.
|
||||||
|
secure: strings.EqualFold(u.Scheme, "https"),
|
||||||
|
config: opts.TLSConfig,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direct dialer
|
||||||
|
return &net.Dialer{Timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLSDialer returns a ContextDialer that establishes a TLS connection to
|
||||||
|
// the destination, while honoring SOCKS5_PROXY/HTTPS_PROXY/HTTP_PROXY.
|
||||||
|
//
|
||||||
|
// The returned dialer performs proxy negotiation (if any), then completes a
|
||||||
|
// TLS handshake to the target using opts.TLSConfig.
|
||||||
|
func NewTLSDialer(opts DialerOpts) (ContextDialer, error) {
|
||||||
|
if opts.Timeout <= 0 {
|
||||||
|
opts.Timeout = 30 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefer SOCKS5 if present.
|
||||||
|
if u := getProxyURLFromEnv("SOCKS5_PROXY"); u != nil {
|
||||||
|
debug.Printf("using SOCKS5 proxy %q\n", u)
|
||||||
|
base, err := newSOCKS5Dialer(u, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &tlsWrappingDialer{base: base, tcfg: opts.TLSConfig, timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// For TLS, prefer HTTPS proxy over HTTP if both set.
|
||||||
|
if u := getProxyURLFromEnv("HTTPS_PROXY"); u != nil {
|
||||||
|
debug.Printf("using HTTPS proxy %q\n", u)
|
||||||
|
base := &httpProxyDialer{
|
||||||
|
proxyURL: u,
|
||||||
|
timeout: opts.Timeout,
|
||||||
|
secure: strings.EqualFold(u.Scheme, "https"),
|
||||||
|
config: opts.TLSConfig,
|
||||||
|
}
|
||||||
|
return &tlsWrappingDialer{base: base, tcfg: opts.TLSConfig, timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if u := getProxyURLFromEnv("HTTP_PROXY"); u != nil {
|
||||||
|
debug.Printf("using HTTP proxy %q\n", u)
|
||||||
|
base := &httpProxyDialer{
|
||||||
|
proxyURL: u,
|
||||||
|
timeout: opts.Timeout,
|
||||||
|
secure: strings.EqualFold(u.Scheme, "https"),
|
||||||
|
config: opts.TLSConfig,
|
||||||
|
}
|
||||||
|
return &tlsWrappingDialer{base: base, tcfg: opts.TLSConfig, timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Direct TLS
|
||||||
|
base := &net.Dialer{Timeout: opts.Timeout}
|
||||||
|
return &tlsWrappingDialer{base: base, tcfg: opts.TLSConfig, timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Implementation helpers ----
|
||||||
|
|
||||||
|
func getProxyURLFromEnv(name string) *url.URL {
|
||||||
|
// check both upper/lowercase
|
||||||
|
v := os.Getenv(name)
|
||||||
|
if v == "" {
|
||||||
|
v = os.Getenv(strings.ToLower(name))
|
||||||
|
}
|
||||||
|
if v == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If scheme omitted, infer from env var name.
|
||||||
|
if !strings.Contains(v, "://") {
|
||||||
|
switch strings.ToUpper(name) {
|
||||||
|
case "SOCKS5_PROXY":
|
||||||
|
v = "socks5://" + v
|
||||||
|
case "HTTPS_PROXY":
|
||||||
|
v = "https://" + v
|
||||||
|
default:
|
||||||
|
v = "http://" + v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPClient returns an *http.Client that is proxy-aware.
|
||||||
|
//
|
||||||
|
// Behavior:
|
||||||
|
// - If SOCKS5_PROXY is set, the client routes all TCP connections through the
|
||||||
|
// SOCKS5 proxy using a custom DialContext, and disables HTTP(S) proxying in
|
||||||
|
// the transport (per our precedence SOCKS5 > HTTPS > HTTP).
|
||||||
|
// - Otherwise, it uses http.ProxyFromEnvironment which supports HTTP_PROXY,
|
||||||
|
// HTTPS_PROXY, and NO_PROXY/no_proxy.
|
||||||
|
// - Connection and TLS handshake timeouts are derived from opts.Timeout.
|
||||||
|
// - For HTTPS targets, opts.TLSConfig is applied to the transport.
|
||||||
|
func NewHTTPClient(opts DialerOpts) (*http.Client, error) {
|
||||||
|
if opts.Timeout <= 0 {
|
||||||
|
opts.Timeout = 30 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base transport configuration
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: opts.TLSConfig,
|
||||||
|
TLSHandshakeTimeout: opts.Timeout,
|
||||||
|
// Leave other fields as Go defaults for compatibility.
|
||||||
|
}
|
||||||
|
|
||||||
|
// If SOCKS5 is configured, use our dialer and disable HTTP proxying to
|
||||||
|
// avoid double-proxying. Otherwise, rely on ProxyFromEnvironment for
|
||||||
|
// HTTP(S) proxies and still set a connect timeout via net.Dialer.
|
||||||
|
if u := getProxyURLFromEnv("SOCKS5_PROXY"); u != nil {
|
||||||
|
d, err := newSOCKS5Dialer(u, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tr.Proxy = nil
|
||||||
|
tr.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
return d.DialContext(ctx, network, address)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tr.Proxy = http.ProxyFromEnvironment
|
||||||
|
// Use a standard net.Dialer to ensure we apply a connect timeout.
|
||||||
|
nd := &net.Dialer{Timeout: opts.Timeout}
|
||||||
|
tr.DialContext = nd.DialContext
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct client; we don't set Client.Timeout here to avoid affecting
|
||||||
|
// streaming responses. Callers can set it if they want an overall deadline.
|
||||||
|
return &http.Client{Transport: tr}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpProxyDialer implements CONNECT tunneling over HTTP or HTTPS proxy.
|
||||||
|
type httpProxyDialer struct {
|
||||||
|
proxyURL *url.URL
|
||||||
|
timeout time.Duration
|
||||||
|
secure bool // true for HTTPS proxy
|
||||||
|
config *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// proxyAddress returns host:port for the proxy, applying defaults by scheme when missing.
|
||||||
|
func (d *httpProxyDialer) proxyAddress() string {
|
||||||
|
proxyAddr := d.proxyURL.Host
|
||||||
|
if !strings.Contains(proxyAddr, ":") {
|
||||||
|
if d.secure {
|
||||||
|
proxyAddr += ":443"
|
||||||
|
} else {
|
||||||
|
proxyAddr += ":80"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return proxyAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// tlsWrapProxyConn performs a TLS handshake to the proxy when d.secure is true.
|
||||||
|
// It clones the provided tls.Config (if any), ensures ServerName and a safe
|
||||||
|
// minimum TLS version.
|
||||||
|
func (d *httpProxyDialer) tlsWrapProxyConn(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||||
|
host := d.proxyURL.Hostname()
|
||||||
|
// Clone provided config (if any) to avoid mutating caller's config.
|
||||||
|
cfg := &tls.Config{} // #nosec G402 - intentional
|
||||||
|
if d.config != nil {
|
||||||
|
cfg = d.config.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ServerName == "" {
|
||||||
|
cfg.ServerName = host
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConn := tls.Client(conn, cfg)
|
||||||
|
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
return nil, fmt.Errorf("tls handshake with https proxy failed: %w", err)
|
||||||
|
}
|
||||||
|
return tlsConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readConnectResponse reads and validates the proxy's response to a CONNECT
|
||||||
|
// request. It returns nil on a 200 status and an error otherwise.
|
||||||
|
func readConnectResponse(br *bufio.Reader) error {
|
||||||
|
statusLine, err := br.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read CONNECT response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(statusLine, "HTTP/") {
|
||||||
|
return fmt.Errorf("invalid proxy response: %q", strings.TrimSpace(statusLine))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(statusLine, " 200 ") && !strings.HasSuffix(strings.TrimSpace(statusLine), " 200") {
|
||||||
|
// Drain headers for context
|
||||||
|
_ = drainHeaders(br)
|
||||||
|
return fmt.Errorf("proxy CONNECT failed: %s", strings.TrimSpace(statusLine))
|
||||||
|
}
|
||||||
|
|
||||||
|
return drainHeaders(br)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *httpProxyDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
if !strings.HasPrefix(network, "tcp") {
|
||||||
|
return nil, fmt.Errorf("http proxy dialer only supports TCP, got %q", network)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial to proxy
|
||||||
|
var nd = &net.Dialer{Timeout: d.timeout}
|
||||||
|
conn, err := nd.DialContext(ctx, "tcp", d.proxyAddress())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deadline covering CONNECT and (for TLS wrapper) will be handled by caller too.
|
||||||
|
if d.timeout > 0 {
|
||||||
|
_ = conn.SetDeadline(time.Now().Add(d.timeout))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If HTTPS proxy, wrap with TLS to the proxy itself.
|
||||||
|
if d.secure {
|
||||||
|
c, werr := d.tlsWrapProxyConn(ctx, conn)
|
||||||
|
if werr != nil {
|
||||||
|
return nil, werr
|
||||||
|
}
|
||||||
|
conn = c
|
||||||
|
}
|
||||||
|
|
||||||
|
req := buildConnectRequest(d.proxyURL, address)
|
||||||
|
if _, err = conn.Write([]byte(req)); err != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
return nil, fmt.Errorf("failed to write CONNECT request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read proxy response until end of headers
|
||||||
|
br := bufio.NewReader(conn)
|
||||||
|
if err = readConnectResponse(br); err != nil {
|
||||||
|
_ = conn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear deadline for caller to manage further I/O.
|
||||||
|
_ = conn.SetDeadline(time.Time{})
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildConnectRequest(proxyURL *url.URL, target string) string {
|
||||||
|
var b strings.Builder
|
||||||
|
fmt.Fprintf(&b, "CONNECT %s HTTP/1.1\r\n", target)
|
||||||
|
fmt.Fprintf(&b, "Host: %s\r\n", target)
|
||||||
|
b.WriteString("Proxy-Connection: Keep-Alive\r\n")
|
||||||
|
b.WriteString("User-Agent: goutils-dialer/1\r\n")
|
||||||
|
|
||||||
|
if proxyURL.User != nil {
|
||||||
|
user := proxyURL.User.Username()
|
||||||
|
pass, _ := proxyURL.User.Password()
|
||||||
|
auth := base64.StdEncoding.EncodeToString([]byte(user + ":" + pass))
|
||||||
|
fmt.Fprintf(&b, "Proxy-Authorization: Basic %s\r\n", auth)
|
||||||
|
}
|
||||||
|
b.WriteString("\r\n")
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func drainHeaders(br *bufio.Reader) error {
|
||||||
|
for {
|
||||||
|
line, err := br.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reading proxy headers: %w", err)
|
||||||
|
}
|
||||||
|
if line == "\r\n" || line == "\n" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSOCKS5Dialer builds a context-aware wrapper over the x/net/proxy dialer.
|
||||||
|
func newSOCKS5Dialer(u *url.URL, opts DialerOpts) (ContextDialer, error) {
|
||||||
|
var auth *xproxy.Auth
|
||||||
|
if u.User != nil {
|
||||||
|
user := u.User.Username()
|
||||||
|
pass, _ := u.User.Password()
|
||||||
|
auth = &xproxy.Auth{User: user, Password: pass}
|
||||||
|
}
|
||||||
|
forward := &net.Dialer{Timeout: opts.Timeout}
|
||||||
|
d, err := xproxy.SOCKS5("tcp", hostPortWithDefault(u, "1080"), auth, forward)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &socks5ContextDialer{d: d, timeout: opts.Timeout}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type socks5ContextDialer struct {
|
||||||
|
d xproxy.Dialer // lacks context; we wrap it
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *socks5ContextDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
if !strings.HasPrefix(network, "tcp") {
|
||||||
|
return nil, errors.New("socks5 dialer only supports TCP")
|
||||||
|
}
|
||||||
|
// Best-effort context support: run the non-context dial in a goroutine
|
||||||
|
// and respect ctx cancellation/timeout.
|
||||||
|
type result struct {
|
||||||
|
c net.Conn
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
ch := make(chan result, 1)
|
||||||
|
go func() {
|
||||||
|
c, err := s.d.Dial("tcp", address)
|
||||||
|
ch <- result{c: c, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case r := <-ch:
|
||||||
|
return r.c, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tlsWrappingDialer performs a TLS handshake over an existing base dialer.
|
||||||
|
type tlsWrappingDialer struct {
|
||||||
|
base ContextDialer
|
||||||
|
tcfg *tls.Config
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tlsWrappingDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
if !strings.HasPrefix(network, "tcp") {
|
||||||
|
return nil, fmt.Errorf("tls dialer only supports TCP, got %q", network)
|
||||||
|
}
|
||||||
|
raw, err := t.base.DialContext(ctx, network, address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply deadline for handshake.
|
||||||
|
if t.timeout > 0 {
|
||||||
|
_ = raw.SetDeadline(time.Now().Add(t.timeout))
|
||||||
|
}
|
||||||
|
|
||||||
|
var h string
|
||||||
|
host := address
|
||||||
|
|
||||||
|
if h, _, err = net.SplitHostPort(address); err == nil {
|
||||||
|
host = h
|
||||||
|
}
|
||||||
|
var cfg *tls.Config
|
||||||
|
if t.tcfg != nil {
|
||||||
|
// Clone to avoid copying internal locks and to prevent mutating caller's config.
|
||||||
|
c := t.tcfg.Clone()
|
||||||
|
if c.ServerName == "" {
|
||||||
|
c.ServerName = host
|
||||||
|
}
|
||||||
|
cfg = c
|
||||||
|
} else {
|
||||||
|
cfg = &tls.Config{ServerName: host, MinVersion: tls.VersionTLS12}
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConn := tls.Client(raw, cfg)
|
||||||
|
if err = tlsConn.HandshakeContext(ctx); err != nil {
|
||||||
|
_ = raw.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear deadline after successful handshake
|
||||||
|
_ = tlsConn.SetDeadline(time.Time{})
|
||||||
|
return tlsConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hostPortWithDefault(u *url.URL, defPort string) string {
|
||||||
|
host := u.Host
|
||||||
|
if !strings.Contains(host, ":") {
|
||||||
|
host += ":" + defPort
|
||||||
|
}
|
||||||
|
return host
|
||||||
|
}
|
||||||
165
lib/fetch.go
Normal file
165
lib/fetch.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||||
|
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Note: Previously this package exposed a FetcherOpts type. It has been
|
||||||
|
// refactored to use *tls.Config directly for configuring TLS behavior.
|
||||||
|
|
||||||
|
// Fetcher is an interface for fetching certificates from a remote source. It
|
||||||
|
// currently supports fetching from a server or a file.
|
||||||
|
type Fetcher interface {
|
||||||
|
Get() (*x509.Certificate, error)
|
||||||
|
GetChain() ([]*x509.Certificate, error)
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerFetcher struct {
|
||||||
|
host string
|
||||||
|
port int
|
||||||
|
insecure bool
|
||||||
|
roots *x509.CertPool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRoots sets the roots for the ServerFetcher.
|
||||||
|
func WithRoots(roots *x509.CertPool) func(*ServerFetcher) {
|
||||||
|
return func(sf *ServerFetcher) {
|
||||||
|
sf.roots = roots
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSkipVerify sets the insecure flag for the ServerFetcher.
|
||||||
|
func WithSkipVerify() func(*ServerFetcher) {
|
||||||
|
return func(sf *ServerFetcher) {
|
||||||
|
sf.insecure = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseServer parses a server string into a ServerFetcher. It can be a URL or a
|
||||||
|
// a host:port pair.
|
||||||
|
func ParseServer(host string) (*ServerFetcher, error) {
|
||||||
|
target, err := hosts.ParseHost(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse server: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ServerFetcher{
|
||||||
|
host: target.Host,
|
||||||
|
port: target.Port,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *ServerFetcher) String() string {
|
||||||
|
return fmt.Sprintf("tls://%s", net.JoinHostPort(sf.host, Itoa(sf.port, -1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *ServerFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||||
|
opts := DialerOpts{
|
||||||
|
TLSConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: sf.insecure, // #nosec G402 - no shit sherlock
|
||||||
|
RootCAs: sf.roots,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := DialTLS(context.Background(), net.JoinHostPort(sf.host, Itoa(sf.port, -1)), opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to dial server: %w", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
state := conn.ConnectionState()
|
||||||
|
return state.PeerCertificates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *ServerFetcher) Get() (*x509.Certificate, error) {
|
||||||
|
certs, err := sf.GetChain()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return certs[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileFetcher struct {
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileFetcher(path string) *FileFetcher {
|
||||||
|
return &FileFetcher{
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ff *FileFetcher) String() string {
|
||||||
|
return ff.path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ff *FileFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||||
|
if ff.path == "-" {
|
||||||
|
certData, err := io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read from stdin: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certlib.ParseCertificatesPEM(certData)
|
||||||
|
}
|
||||||
|
|
||||||
|
certs, err := certlib.LoadCertificates(ff.path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load chain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ff *FileFetcher) Get() (*x509.Certificate, error) {
|
||||||
|
certs, err := ff.GetChain()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return certs[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCertificateChain fetches a certificate chain from a remote source.
|
||||||
|
// If cfg is non-nil and spec refers to a TLS server, the provided TLS
|
||||||
|
// configuration will be used to control verification behavior (e.g.,
|
||||||
|
// InsecureSkipVerify, RootCAs).
|
||||||
|
func GetCertificateChain(spec string, cfg *tls.Config) ([]*x509.Certificate, error) {
|
||||||
|
if fileutil.FileDoesExist(spec) {
|
||||||
|
return NewFileFetcher(spec).GetChain()
|
||||||
|
}
|
||||||
|
|
||||||
|
fetcher, err := ParseServer(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg != nil {
|
||||||
|
fetcher.insecure = cfg.InsecureSkipVerify
|
||||||
|
fetcher.roots = cfg.RootCAs
|
||||||
|
}
|
||||||
|
|
||||||
|
return fetcher.GetChain()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCertificate fetches the first certificate from a certificate chain.
|
||||||
|
func GetCertificate(spec string, cfg *tls.Config) (*x509.Certificate, error) {
|
||||||
|
certs, err := GetCertificateChain(spec, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return certs[0], nil
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build freebsd darwin,386 netbsd
|
//go:build bsd
|
||||||
|
|
||||||
package lib
|
package lib
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build unix linux openbsd darwin,amd64
|
//go:build unix || linux || openbsd || (darwin && amd64)
|
||||||
|
|
||||||
package lib
|
package lib
|
||||||
|
|
||||||
@@ -18,7 +18,7 @@ type FileTime struct {
|
|||||||
|
|
||||||
func timeSpecToTime(ts unix.Timespec) time.Time {
|
func timeSpecToTime(ts unix.Timespec) time.Time {
|
||||||
// The casts to int64 are needed because on 386, these are int32s.
|
// The casts to int64 are needed because on 386, these are int32s.
|
||||||
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
|
return time.Unix(ts.Sec, ts.Nsec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadFileTime returns a FileTime associated with the file.
|
// LoadFileTime returns a FileTime associated with the file.
|
||||||
|
|||||||
162
lib/lib.go
162
lib/lib.go
@@ -1,15 +1,22 @@
|
|||||||
// Package lib contains functions useful for most programs.
|
|
||||||
package lib
|
package lib
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var progname = filepath.Base(os.Args[0])
|
var progname = filepath.Base(os.Args[0])
|
||||||
|
|
||||||
|
const (
|
||||||
|
daysInYear = 365
|
||||||
|
digitWidth = 10
|
||||||
|
hoursInQuarterDay = 6
|
||||||
|
)
|
||||||
|
|
||||||
// ProgName returns what lib thinks the program name is, namely the
|
// ProgName returns what lib thinks the program name is, namely the
|
||||||
// basename of argv0.
|
// basename of argv0.
|
||||||
//
|
//
|
||||||
@@ -20,7 +27,7 @@ func ProgName() string {
|
|||||||
|
|
||||||
// Warnx displays a formatted error message to standard error, à la
|
// Warnx displays a formatted error message to standard error, à la
|
||||||
// warnx(3).
|
// warnx(3).
|
||||||
func Warnx(format string, a ...interface{}) (int, error) {
|
func Warnx(format string, a ...any) (int, error) {
|
||||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
format += "\n"
|
format += "\n"
|
||||||
return fmt.Fprintf(os.Stderr, format, a...)
|
return fmt.Fprintf(os.Stderr, format, a...)
|
||||||
@@ -28,7 +35,7 @@ func Warnx(format string, a ...interface{}) (int, error) {
|
|||||||
|
|
||||||
// Warn displays a formatted error message to standard output,
|
// Warn displays a formatted error message to standard output,
|
||||||
// appending the error string, à la warn(3).
|
// appending the error string, à la warn(3).
|
||||||
func Warn(err error, format string, a ...interface{}) (int, error) {
|
func Warn(err error, format string, a ...any) (int, error) {
|
||||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
format += ": %v\n"
|
format += ": %v\n"
|
||||||
a = append(a, err)
|
a = append(a, err)
|
||||||
@@ -37,7 +44,7 @@ func Warn(err error, format string, a ...interface{}) (int, error) {
|
|||||||
|
|
||||||
// Errx displays a formatted error message to standard error and exits
|
// Errx displays a formatted error message to standard error and exits
|
||||||
// with the status code from `exit`, à la errx(3).
|
// with the status code from `exit`, à la errx(3).
|
||||||
func Errx(exit int, format string, a ...interface{}) {
|
func Errx(exit int, format string, a ...any) {
|
||||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
format += "\n"
|
format += "\n"
|
||||||
fmt.Fprintf(os.Stderr, format, a...)
|
fmt.Fprintf(os.Stderr, format, a...)
|
||||||
@@ -47,7 +54,7 @@ func Errx(exit int, format string, a ...interface{}) {
|
|||||||
// Err displays a formatting error message to standard error,
|
// Err displays a formatting error message to standard error,
|
||||||
// appending the error string, and exits with the status code from
|
// appending the error string, and exits with the status code from
|
||||||
// `exit`, à la err(3).
|
// `exit`, à la err(3).
|
||||||
func Err(exit int, err error, format string, a ...interface{}) {
|
func Err(exit int, err error, format string, a ...any) {
|
||||||
format = fmt.Sprintf("[%s] %s", progname, format)
|
format = fmt.Sprintf("[%s] %s", progname, format)
|
||||||
format += ": %v\n"
|
format += ": %v\n"
|
||||||
a = append(a, err)
|
a = append(a, err)
|
||||||
@@ -62,30 +69,30 @@ func Itoa(i int, wid int) string {
|
|||||||
// Assemble decimal in reverse order.
|
// Assemble decimal in reverse order.
|
||||||
var b [20]byte
|
var b [20]byte
|
||||||
bp := len(b) - 1
|
bp := len(b) - 1
|
||||||
for i >= 10 || wid > 1 {
|
for i >= digitWidth || wid > 1 {
|
||||||
wid--
|
wid--
|
||||||
q := i / 10
|
q := i / digitWidth
|
||||||
b[bp] = byte('0' + i - q*10)
|
b[bp] = byte('0' + i - q*digitWidth)
|
||||||
bp--
|
bp--
|
||||||
i = q
|
i = q
|
||||||
}
|
}
|
||||||
// i < 10
|
|
||||||
b[bp] = byte('0' + i)
|
b[bp] = byte('0' + i)
|
||||||
return string(b[bp:])
|
return string(b[bp:])
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dayDuration = 24 * time.Hour
|
dayDuration = 24 * time.Hour
|
||||||
yearDuration = (365 * dayDuration) + (6 * time.Hour)
|
yearDuration = (daysInYear * dayDuration) + (hoursInQuarterDay * time.Hour)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Duration returns a prettier string for time.Durations.
|
// Duration returns a prettier string for time.Durations.
|
||||||
func Duration(d time.Duration) string {
|
func Duration(d time.Duration) string {
|
||||||
var s string
|
var s string
|
||||||
if d >= yearDuration {
|
if d >= yearDuration {
|
||||||
years := d / yearDuration
|
years := int64(d / yearDuration)
|
||||||
s += fmt.Sprintf("%dy", years)
|
s += fmt.Sprintf("%dy", years)
|
||||||
d -= years * yearDuration
|
d -= time.Duration(years) * yearDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
if d >= dayDuration {
|
if d >= dayDuration {
|
||||||
@@ -98,8 +105,135 @@ func Duration(d time.Duration) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
d %= 1 * time.Second
|
d %= 1 * time.Second
|
||||||
hours := d / time.Hour
|
hours := int64(d / time.Hour)
|
||||||
d -= hours * time.Hour
|
d -= time.Duration(hours) * time.Hour
|
||||||
s += fmt.Sprintf("%dh%s", hours, d)
|
s += fmt.Sprintf("%dh%s", hours, d)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HexEncodeMode uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HexEncodeLower prints the bytes as lowercase hexadecimal.
|
||||||
|
HexEncodeLower HexEncodeMode = iota + 1
|
||||||
|
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
|
||||||
|
HexEncodeUpper
|
||||||
|
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
|
||||||
|
// with colons between each pair of bytes.
|
||||||
|
HexEncodeLowerColon
|
||||||
|
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
|
||||||
|
// with colons between each pair of bytes.
|
||||||
|
HexEncodeUpperColon
|
||||||
|
// HexEncodeBytes prints the string as a sequence of []byte.
|
||||||
|
HexEncodeBytes
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m HexEncodeMode) String() string {
|
||||||
|
switch m {
|
||||||
|
case HexEncodeLower:
|
||||||
|
return "lower"
|
||||||
|
case HexEncodeUpper:
|
||||||
|
return "upper"
|
||||||
|
case HexEncodeLowerColon:
|
||||||
|
return "lcolon"
|
||||||
|
case HexEncodeUpperColon:
|
||||||
|
return "ucolon"
|
||||||
|
case HexEncodeBytes:
|
||||||
|
return "bytes"
|
||||||
|
default:
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseHexEncodeMode(s string) HexEncodeMode {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "lower":
|
||||||
|
return HexEncodeLower
|
||||||
|
case "upper":
|
||||||
|
return HexEncodeUpper
|
||||||
|
case "lcolon":
|
||||||
|
return HexEncodeLowerColon
|
||||||
|
case "ucolon":
|
||||||
|
return HexEncodeUpperColon
|
||||||
|
case "bytes":
|
||||||
|
return HexEncodeBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexColons(s string) string {
|
||||||
|
if len(s)%2 != 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
|
||||||
|
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
|
||||||
|
panic("invalid hex string length")
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(s)
|
||||||
|
if n <= 2 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
pairCount := n / 2
|
||||||
|
if n%2 != 0 {
|
||||||
|
pairCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.Grow(n + pairCount - 1)
|
||||||
|
|
||||||
|
for i := 0; i < n; i += 2 {
|
||||||
|
b.WriteByte(s[i])
|
||||||
|
|
||||||
|
if i+1 < n {
|
||||||
|
b.WriteByte(s[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
if i+2 < n {
|
||||||
|
b.WriteByte(':')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexEncode(b []byte) string {
|
||||||
|
s := hex.EncodeToString(b)
|
||||||
|
|
||||||
|
if len(s)%2 != 0 {
|
||||||
|
s = "0" + s
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesAsByteSliceString(buf []byte) string {
|
||||||
|
sb := &strings.Builder{}
|
||||||
|
sb.WriteString("[]byte{")
|
||||||
|
for i := range buf {
|
||||||
|
fmt.Fprintf(sb, "0x%02x, ", buf[i])
|
||||||
|
}
|
||||||
|
sb.WriteString("}")
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HexEncode encodes the given bytes as a hexadecimal string.
|
||||||
|
func HexEncode(b []byte, mode HexEncodeMode) string {
|
||||||
|
str := hexEncode(b)
|
||||||
|
|
||||||
|
switch mode {
|
||||||
|
case HexEncodeLower:
|
||||||
|
return str
|
||||||
|
case HexEncodeUpper:
|
||||||
|
return strings.ToUpper(str)
|
||||||
|
case HexEncodeLowerColon:
|
||||||
|
return hexColons(str)
|
||||||
|
case HexEncodeUpperColon:
|
||||||
|
return strings.ToUpper(hexColons(str))
|
||||||
|
case HexEncodeBytes:
|
||||||
|
return bytesAsByteSliceString(b)
|
||||||
|
default:
|
||||||
|
panic("invalid hex encode mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
79
lib/lib_test.go
Normal file
79
lib/lib_test.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package lib_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.wntrmute.dev/kyle/goutils/lib"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHexEncode_LowerUpper(t *testing.T) {
|
||||||
|
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||||
|
|
||||||
|
gotLower := lib.HexEncode(b, lib.HexEncodeLower)
|
||||||
|
if gotLower != "0fa100ff" {
|
||||||
|
t.Fatalf("lib.HexEncode lower: expected %q, got %q", "0fa100ff", gotLower)
|
||||||
|
}
|
||||||
|
|
||||||
|
gotUpper := lib.HexEncode(b, lib.HexEncodeUpper)
|
||||||
|
if gotUpper != "0FA100FF" {
|
||||||
|
t.Fatalf("lib.HexEncode upper: expected %q, got %q", "0FA100FF", gotUpper)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHexEncode_ColonModes(t *testing.T) {
|
||||||
|
// Includes leading zero nibble and a zero byte to verify padding and separators
|
||||||
|
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||||
|
|
||||||
|
gotLColon := lib.HexEncode(b, lib.HexEncodeLowerColon)
|
||||||
|
if gotLColon != "0f:a1:00:ff" {
|
||||||
|
t.Fatalf("lib.HexEncode colon lower: expected %q, got %q", "0f:a1:00:ff", gotLColon)
|
||||||
|
}
|
||||||
|
|
||||||
|
gotUColon := lib.HexEncode(b, lib.HexEncodeUpperColon)
|
||||||
|
if gotUColon != "0F:A1:00:FF" {
|
||||||
|
t.Fatalf("lib.HexEncode colon upper: expected %q, got %q", "0F:A1:00:FF", gotUColon)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHexEncode_EmptyInput(t *testing.T) {
|
||||||
|
var b []byte
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "" {
|
||||||
|
t.Fatalf("empty lower: expected empty string, got %q", got)
|
||||||
|
}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "" {
|
||||||
|
t.Fatalf("empty upper: expected empty string, got %q", got)
|
||||||
|
}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "" {
|
||||||
|
t.Fatalf("empty colon lower: expected empty string, got %q", got)
|
||||||
|
}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "" {
|
||||||
|
t.Fatalf("empty colon upper: expected empty string, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHexEncode_SingleByte(t *testing.T) {
|
||||||
|
b := []byte{0x0f}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "0f" {
|
||||||
|
t.Fatalf("single byte lower: expected %q, got %q", "0f", got)
|
||||||
|
}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "0F" {
|
||||||
|
t.Fatalf("single byte upper: expected %q, got %q", "0F", got)
|
||||||
|
}
|
||||||
|
// For a single byte, colon modes should not introduce separators
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "0f" {
|
||||||
|
t.Fatalf("single byte colon lower: expected %q, got %q", "0f", got)
|
||||||
|
}
|
||||||
|
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "0F" {
|
||||||
|
t.Fatalf("single byte colon upper: expected %q, got %q", "0F", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHexEncode_InvalidModePanics(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r == nil {
|
||||||
|
t.Fatalf("expected panic for invalid mode, but function returned normally")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// 0 is not a valid lib.HexEncodeMode (valid modes start at 1)
|
||||||
|
_ = lib.HexEncode([]byte{0x01}, lib.HexEncodeMode(0))
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Package syslog is a syslog-type facility for logging.
|
// Package log is a syslog-type facility for logging.
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -17,7 +17,7 @@ type logger struct {
|
|||||||
writeConsole bool
|
writeConsole bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}) {
|
func (log *logger) printf(p gsyslog.Priority, format string, args ...any) {
|
||||||
if !strings.HasSuffix(format, "\n") {
|
if !strings.HasSuffix(format, "\n") {
|
||||||
format += "\n"
|
format += "\n"
|
||||||
}
|
}
|
||||||
@@ -28,33 +28,33 @@ func (log *logger) printf(p gsyslog.Priority, format string, args ...interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprintf(format, args...)))
|
_ = log.l.WriteLevel(p, fmt.Appendf(nil, format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) print(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) print(p gsyslog.Priority, args ...any) {
|
||||||
if p <= log.p && log.writeConsole {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Print(args...)
|
fmt.Print(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprint(args...)))
|
_ = log.l.WriteLevel(p, fmt.Append(nil, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) println(p gsyslog.Priority, args ...interface{}) {
|
func (log *logger) println(p gsyslog.Priority, args ...any) {
|
||||||
if p <= log.p && log.writeConsole {
|
if p <= log.p && log.writeConsole {
|
||||||
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
fmt.Printf("%s [%s] ", prioritiev[p], timestamp())
|
||||||
fmt.Println(args...)
|
fmt.Println(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if log.l != nil {
|
if log.l != nil {
|
||||||
log.l.WriteLevel(p, []byte(fmt.Sprintln(args...)))
|
_ = log.l.WriteLevel(p, fmt.Appendln(nil, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (log *logger) spew(args ...interface{}) {
|
func (log *logger) spew(args ...any) {
|
||||||
if log.p == gsyslog.LOG_DEBUG {
|
if log.p == gsyslog.LOG_DEBUG {
|
||||||
spew.Dump(args...)
|
spew.Dump(args...)
|
||||||
}
|
}
|
||||||
@@ -160,109 +160,109 @@ func Setup(opts *Options) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debug(args ...interface{}) {
|
func Debug(args ...any) {
|
||||||
log.print(gsyslog.LOG_DEBUG, args...)
|
log.print(gsyslog.LOG_DEBUG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Info(args ...interface{}) {
|
func Info(args ...any) {
|
||||||
log.print(gsyslog.LOG_INFO, args...)
|
log.print(gsyslog.LOG_INFO, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Notice(args ...interface{}) {
|
func Notice(args ...any) {
|
||||||
log.print(gsyslog.LOG_NOTICE, args...)
|
log.print(gsyslog.LOG_NOTICE, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warning(args ...interface{}) {
|
func Warning(args ...any) {
|
||||||
log.print(gsyslog.LOG_WARNING, args...)
|
log.print(gsyslog.LOG_WARNING, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Err(args ...interface{}) {
|
func Err(args ...any) {
|
||||||
log.print(gsyslog.LOG_ERR, args...)
|
log.print(gsyslog.LOG_ERR, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Crit(args ...interface{}) {
|
func Crit(args ...any) {
|
||||||
log.print(gsyslog.LOG_CRIT, args...)
|
log.print(gsyslog.LOG_CRIT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alert(args ...interface{}) {
|
func Alert(args ...any) {
|
||||||
log.print(gsyslog.LOG_ALERT, args...)
|
log.print(gsyslog.LOG_ALERT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emerg(args ...interface{}) {
|
func Emerg(args ...any) {
|
||||||
log.print(gsyslog.LOG_EMERG, args...)
|
log.print(gsyslog.LOG_EMERG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...any) {
|
||||||
log.println(gsyslog.LOG_DEBUG, args...)
|
log.println(gsyslog.LOG_DEBUG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Infoln(args ...interface{}) {
|
func Infoln(args ...any) {
|
||||||
log.println(gsyslog.LOG_INFO, args...)
|
log.println(gsyslog.LOG_INFO, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Noticeln(args ...interface{}) {
|
func Noticeln(args ...any) {
|
||||||
log.println(gsyslog.LOG_NOTICE, args...)
|
log.println(gsyslog.LOG_NOTICE, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warningln(args ...interface{}) {
|
func Warningln(args ...any) {
|
||||||
log.print(gsyslog.LOG_WARNING, args...)
|
log.print(gsyslog.LOG_WARNING, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Errln(args ...interface{}) {
|
func Errln(args ...any) {
|
||||||
log.println(gsyslog.LOG_ERR, args...)
|
log.println(gsyslog.LOG_ERR, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Critln(args ...interface{}) {
|
func Critln(args ...any) {
|
||||||
log.println(gsyslog.LOG_CRIT, args...)
|
log.println(gsyslog.LOG_CRIT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alertln(args ...interface{}) {
|
func Alertln(args ...any) {
|
||||||
log.println(gsyslog.LOG_ALERT, args...)
|
log.println(gsyslog.LOG_ALERT, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emergln(args ...interface{}) {
|
func Emergln(args ...any) {
|
||||||
log.println(gsyslog.LOG_EMERG, args...)
|
log.println(gsyslog.LOG_EMERG, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Debugf(format string, args ...interface{}) {
|
func Debugf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
log.printf(gsyslog.LOG_DEBUG, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Infof(format string, args ...interface{}) {
|
func Infof(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_INFO, format, args...)
|
log.printf(gsyslog.LOG_INFO, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Noticef(format string, args ...interface{}) {
|
func Noticef(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
log.printf(gsyslog.LOG_NOTICE, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Warningf(format string, args ...interface{}) {
|
func Warningf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_WARNING, format, args...)
|
log.printf(gsyslog.LOG_WARNING, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Errf(format string, args ...interface{}) {
|
func Errf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Critf(format string, args ...interface{}) {
|
func Critf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_CRIT, format, args...)
|
log.printf(gsyslog.LOG_CRIT, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Alertf(format string, args ...interface{}) {
|
func Alertf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ALERT, format, args...)
|
log.printf(gsyslog.LOG_ALERT, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Emergf(format string, args ...interface{}) {
|
func Emergf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_EMERG, format, args...)
|
log.printf(gsyslog.LOG_EMERG, format, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...any) {
|
||||||
log.println(gsyslog.LOG_ERR, args...)
|
log.println(gsyslog.LOG_ERR, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...any) {
|
||||||
log.printf(gsyslog.LOG_ERR, format, args...)
|
log.printf(gsyslog.LOG_ERR, format, args...)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -279,7 +279,7 @@ func FatalError(err error, message string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
// Spew will pretty print the args if the logger is set to DEBUG priority.
|
||||||
func Spew(args ...interface{}) {
|
func Spew(args ...any) {
|
||||||
log.spew(args...)
|
log.spew(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,14 +2,13 @@
|
|||||||
// consist of timestamps, an actor and event string, and a mapping of
|
// consist of timestamps, an actor and event string, and a mapping of
|
||||||
// string key-value attribute pairs. For example,
|
// string key-value attribute pairs. For example,
|
||||||
//
|
//
|
||||||
// log.Error("serialiser", "failed to open file",
|
// log.Error("serialiser", "failed to open file",
|
||||||
// map[string]string{
|
// map[string]string{
|
||||||
// "error": err.Error(),
|
// "error": err.Error(),
|
||||||
// "path": "data.bin",
|
// "path": "data.bin",
|
||||||
// })
|
// })
|
||||||
//
|
//
|
||||||
// This produces the output message
|
// This produces the output message
|
||||||
//
|
//
|
||||||
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
// [2016-04-01T15:04:30-0700] [ERROR] [actor:serialiser event:failed to open file] error=is a directory path=data.bin
|
||||||
//
|
|
||||||
package logging
|
package logging
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user