Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ea5ffa4828 | |||
| aa96e47112 | |||
| d34a417dce | |||
| d11e0cf9f9 | |||
| aad7d68599 | |||
| 4560868688 | |||
| 8d5406256f | |||
| 9280e846fa | |||
| 0a71661901 | |||
| 804f53d27d | |||
| cfb80355bb | |||
| 77160395a0 | |||
| 37d5e04421 | |||
| dc54eeacbc | |||
| e2a3081ce5 | |||
| 3149d958f4 | |||
| f296344acf | |||
| 3fb2d88a3f | |||
| 150c02b377 | |||
| 83f88c49fe | |||
| 7c437ac45f | |||
| c999bf35b0 | |||
| 4dc135cfe0 | |||
| 790113e189 | |||
| 0dcd18c6f1 | |||
| 024d552293 | |||
| 9cd2ced695 | |||
| 619c08a13f | |||
| 944a57bf0e | |||
| 0857b29624 | |||
|
|
e95404bfc5 | ||
|
|
924654e7c4 | ||
| 9e0979e07f | |||
|
|
bbc82ff8de | ||
|
|
5fd928f69a | ||
|
|
acefe4a3b9 | ||
| a1452cebc9 | |||
| 6e9812e6f5 | |||
|
|
8c34415c34 | ||
|
|
2cf2c15def | ||
|
|
eaad1884d4 | ||
| 5d57d844d4 | |||
|
|
31b9d175dd | ||
|
|
79e106da2e | ||
|
|
939b1bc272 | ||
|
|
89e74f390b | ||
|
|
7881b6fdfc | ||
|
|
5bef33245f | ||
|
|
84250b0501 | ||
|
|
459e9f880f | ||
|
|
0982f47ce3 | ||
|
|
1dec15fd11 | ||
|
|
2ee9cae5ba | ||
|
|
dc04475120 | ||
|
|
dbbd5116b5 |
35
.github/workflows/release.yml
vendored
Normal file
35
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
name: GoReleaser
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: true
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
.idea
|
||||
cmd/cert-bundler/testdata/pkg/*
|
||||
# Added by goreleaser init:
|
||||
dist/
|
||||
cmd/cert-bundler/testdata/bundle/
|
||||
|
||||
@@ -12,6 +12,12 @@
|
||||
|
||||
version: "2"
|
||||
|
||||
output:
|
||||
sort-order:
|
||||
- file
|
||||
- linter
|
||||
- severity
|
||||
|
||||
issues:
|
||||
# Maximum count of issues with the same text.
|
||||
# Set to 0 to disable.
|
||||
@@ -384,6 +390,9 @@ linters:
|
||||
- 3
|
||||
- 4
|
||||
- 8
|
||||
- 24
|
||||
- 30
|
||||
- 365
|
||||
|
||||
nakedret:
|
||||
# Make an issue if func has more lines of code than this setting, and it has naked returns.
|
||||
@@ -454,6 +463,8 @@ linters:
|
||||
- -QF1008
|
||||
# We often explicitly enable old/deprecated ciphers for research.
|
||||
- -SA1019
|
||||
# Covered by revive.
|
||||
- -ST1003
|
||||
|
||||
usetesting:
|
||||
# Enable/disable `os.TempDir()` detections.
|
||||
@@ -472,6 +483,8 @@ linters:
|
||||
rules:
|
||||
- path: 'ahash/ahash.go'
|
||||
linters: [ staticcheck, gosec ]
|
||||
- path: 'twofactor/.*.go'
|
||||
linters: [ exhaustive, mnd, revive ]
|
||||
- path: 'backoff/backoff_test.go'
|
||||
linters: [ testpackage ]
|
||||
- path: 'dbg/dbg_test.go'
|
||||
|
||||
453
.goreleaser.yaml
Normal file
453
.goreleaser.yaml
Normal file
@@ -0,0 +1,453 @@
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
|
||||
# The lines below are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/need to use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
# you may remove this if you don't need go generate
|
||||
- go generate ./...
|
||||
|
||||
builds:
|
||||
- id: atping
|
||||
main: ./cmd/atping/main.go
|
||||
binary: atping
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: ca-signed
|
||||
main: ./cmd/ca-signed/main.go
|
||||
binary: ca-signed
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cert-bundler
|
||||
main: ./cmd/cert-bundler/main.go
|
||||
binary: cert-bundler
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cert-revcheck
|
||||
main: ./cmd/cert-revcheck/main.go
|
||||
binary: cert-revcheck
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certchain
|
||||
main: ./cmd/certchain/main.go
|
||||
binary: certchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certdump
|
||||
main: ./cmd/certdump/main.go
|
||||
binary: certdump
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certexpiry
|
||||
main: ./cmd/certexpiry/main.go
|
||||
binary: certexpiry
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certser
|
||||
main: ./cmd/certser/main.go
|
||||
binary: certser
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: certverify
|
||||
main: ./cmd/certverify/main.go
|
||||
binary: certverify
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: clustersh
|
||||
main: ./cmd/clustersh/main.go
|
||||
binary: clustersh
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: cruntar
|
||||
main: ./cmd/cruntar/main.go
|
||||
binary: cruntar
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: csrpubdump
|
||||
main: ./cmd/csrpubdump/main.go
|
||||
binary: csrpubdump
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: data_sync
|
||||
main: ./cmd/data_sync/main.go
|
||||
binary: data_sync
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: diskimg
|
||||
main: ./cmd/diskimg/main.go
|
||||
binary: diskimg
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: dumpbytes
|
||||
main: ./cmd/dumpbytes/main.go
|
||||
binary: dumpbytes
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: eig
|
||||
main: ./cmd/eig/main.go
|
||||
binary: eig
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: fragment
|
||||
main: ./cmd/fragment/main.go
|
||||
binary: fragment
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: host
|
||||
main: ./cmd/host/main.go
|
||||
binary: host
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: jlp
|
||||
main: ./cmd/jlp/main.go
|
||||
binary: jlp
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: kgz
|
||||
main: ./cmd/kgz/main.go
|
||||
binary: kgz
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: minmax
|
||||
main: ./cmd/minmax/main.go
|
||||
binary: minmax
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: parts
|
||||
main: ./cmd/parts/main.go
|
||||
binary: parts
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pem2bin
|
||||
main: ./cmd/pem2bin/main.go
|
||||
binary: pem2bin
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pembody
|
||||
main: ./cmd/pembody/main.go
|
||||
binary: pembody
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: pemit
|
||||
main: ./cmd/pemit/main.go
|
||||
binary: pemit
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: readchain
|
||||
main: ./cmd/readchain/main.go
|
||||
binary: readchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: renfnv
|
||||
main: ./cmd/renfnv/main.go
|
||||
binary: renfnv
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: rhash
|
||||
main: ./cmd/rhash/main.go
|
||||
binary: rhash
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: rolldie
|
||||
main: ./cmd/rolldie/main.go
|
||||
binary: rolldie
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: showimp
|
||||
main: ./cmd/showimp/main.go
|
||||
binary: showimp
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: ski
|
||||
main: ./cmd/ski/main.go
|
||||
binary: ski
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: sprox
|
||||
main: ./cmd/sprox/main.go
|
||||
binary: sprox
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: stealchain
|
||||
main: ./cmd/stealchain/main.go
|
||||
binary: stealchain
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: stealchain-server
|
||||
main: ./cmd/stealchain-server/main.go
|
||||
binary: stealchain-server
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: subjhash
|
||||
main: ./cmd/subjhash/main.go
|
||||
binary: subjhash
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: tlsinfo
|
||||
main: ./cmd/tlsinfo/main.go
|
||||
binary: tlsinfo
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: tlskeypair
|
||||
main: ./cmd/tlskeypair/main.go
|
||||
binary: tlskeypair
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: utc
|
||||
main: ./cmd/utc/main.go
|
||||
binary: utc
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: yamll
|
||||
main: ./cmd/yamll/main.go
|
||||
binary: yamll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- id: zsearch
|
||||
main: ./cmd/zsearch/main.go
|
||||
binary: zsearch
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos: [linux, darwin]
|
||||
goarch: [amd64, arm64]
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
|
||||
archives:
|
||||
- formats: [tar.gz]
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
gitea_urls:
|
||||
api: https://git.wntrmute.dev/api/v1
|
||||
download: https://git.wntrmute.dev
|
||||
# set to true if you use a self-signed certificate
|
||||
skip_tls_verify: false
|
||||
|
||||
release:
|
||||
footer: >-
|
||||
|
||||
---
|
||||
|
||||
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
||||
71
CHANGELOG
71
CHANGELOG
@@ -1,5 +1,76 @@
|
||||
CHANGELOG
|
||||
|
||||
v1.13.3 - 2025-11-18
|
||||
|
||||
Added:
|
||||
- certlib: introduce `Fetcher` for retrieving certificates.
|
||||
- lib: `HexEncode` gains a byte-slice output variant.
|
||||
- build: add GoReleaser configuration.
|
||||
|
||||
Changed:
|
||||
- cmd: migrate programs to use `certlib.Fetcher` for certificate retrieval
|
||||
(includes `certdump`, `ski`, and others).
|
||||
- cmd/ski: update display mode.
|
||||
|
||||
Misc:
|
||||
- repository fixups and small cleanups.
|
||||
|
||||
v1.13.2 - 2025-11-17
|
||||
|
||||
Add:
|
||||
- certlib/bundler: refactor certificate bundling from cmd/cert-bundler
|
||||
into a separate package.
|
||||
|
||||
Changed:
|
||||
- cmd/cert-bundler: refactor to use bundler package, and update Dockerfile.
|
||||
|
||||
v1.13.1 - 2025-11-17
|
||||
|
||||
Add:
|
||||
- Dockerfile for cert-bundler.
|
||||
|
||||
v1.13.0 - 2025-11-16
|
||||
|
||||
Add:
|
||||
- cmd/certser: print serial numbers for certificates.
|
||||
- lib/HexEncode: add a new hex encode function handling multiple output
|
||||
formats, including with and without colons.
|
||||
|
||||
v1.12.4 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
|
||||
- Linting fixes for twofactor that were previously masked.
|
||||
|
||||
v1.12.3 erroneously tagged and pushed
|
||||
|
||||
v1.12.2 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
|
||||
- add rsc.io/qr dependency for twofactor.
|
||||
|
||||
v1.12.1 - 2025-11-16
|
||||
|
||||
Changed:
|
||||
- twofactor: Remove go.{mod,sum}.
|
||||
|
||||
v1.12.0 - 2025-11-16
|
||||
|
||||
Added
|
||||
- twofactor: the github.com/kisom/twofactor repo has been subtree'd
|
||||
into this repo.
|
||||
|
||||
v1.11.2 - 2025-11-16
|
||||
|
||||
Changed
|
||||
- cmd/ski, cmd/csrpubdump, cmd/tlskeypair: centralize
|
||||
certificate/private-key/CSR parsing by reusing certlib helpers.
|
||||
This reduces duplication and improves consistency across commands.
|
||||
- csr: CSR parsing in the above commands now uses certlib.ParseCSR,
|
||||
which verifies CSR signatures (behavioral hardening compared to
|
||||
prior parsing without signature verification).
|
||||
|
||||
v1.11.1 - 2025-11-16
|
||||
|
||||
Changed
|
||||
|
||||
53
README.md
53
README.md
@@ -2,39 +2,52 @@ GOUTILS
|
||||
|
||||
This is a collection of small utility code I've written in Go; the `cmd/`
|
||||
directory has a number of command-line utilities. Rather than keep all
|
||||
of these in superfluous repositories of their own, or rewriting them
|
||||
of these in superfluous repositories of their own or rewriting them
|
||||
for each project, I'm putting them here.
|
||||
|
||||
The project can be built with the standard Go tooling, or it can be built
|
||||
with Bazel.
|
||||
The project can be built with the standard Go tooling.
|
||||
|
||||
Contents:
|
||||
|
||||
ahash/ Provides hashes from string algorithm specifiers.
|
||||
assert/ Error handling, assertion-style.
|
||||
backoff/ Implementation of an intelligent backoff strategy.
|
||||
cache/ Implementations of various caches.
|
||||
lru/ Least-recently-used cache.
|
||||
mru/ Most-recently-used cache.
|
||||
certlib/ Library for working with TLS certificates.
|
||||
cmd/
|
||||
atping/ Automated TCP ping, meant for putting in cronjobs.
|
||||
certchain/ Display the certificate chain from a
|
||||
TLS connection.
|
||||
ca-signed/ Validate whether a certificate is signed by a CA.
|
||||
cert-bundler/
|
||||
Create certificate bundles from a source of PEM
|
||||
certificates.
|
||||
cert-revcheck/
|
||||
Check whether a certificate has been revoked or is
|
||||
expired.
|
||||
certchain/ Display the certificate chain from a TLS connection.
|
||||
certdump/ Dump certificate information.
|
||||
certexpiry/ Print a list of certificate subjects and expiry times
|
||||
or warn about certificates expiring within a certain
|
||||
window.
|
||||
certverify/ Verify a TLS X.509 certificate, optionally printing
|
||||
certverify/ Verify a TLS X.509 certificate file, optionally printing
|
||||
the time to expiry and checking for revocations.
|
||||
clustersh/ Run commands or transfer files across multiple
|
||||
servers via SSH.
|
||||
cruntar/ Untar an archive with hard links, copying instead of
|
||||
cruntar/ (Un)tar an archive with hard links, copying instead of
|
||||
linking.
|
||||
csrpubdump/ Dump the public key from an X.509 certificate request.
|
||||
data_sync/ Sync the user's homedir to external storage.
|
||||
diskimg/ Write a disk image to a device.
|
||||
dumpbytes/ Dump the contents of a file as hex bytes, printing it as
|
||||
a Go []byte literal.
|
||||
eig/ EEPROM image generator.
|
||||
fragment/ Print a fragment of a file.
|
||||
host/ Go imlpementation of the host(1) command.
|
||||
jlp/ JSON linter/prettifier.
|
||||
kgz/ Custom gzip compressor / decompressor that handles 99%
|
||||
of my use cases.
|
||||
minmax/ Generate a minmax code for use in uLisp.
|
||||
parts/ Simple parts database management for my collection of
|
||||
electronic components.
|
||||
pem2bin/ Dump the binary body of a PEM-encoded block.
|
||||
@@ -44,41 +57,45 @@ Contents:
|
||||
in a bundle.
|
||||
renfnv/ Rename a file to base32-encoded 64-bit FNV-1a hash.
|
||||
rhash/ Compute the digest of remote files.
|
||||
rolldie/ Roll some dice.
|
||||
showimp/ List the external (e.g. non-stdlib and outside the
|
||||
current working directory) imports for a Go file.
|
||||
ski Display the SKI for PEM-encoded TLS material.
|
||||
sprox/ Simple TCP proxy.
|
||||
stealchain/ Dump the verified chain from a TLS
|
||||
connection to a server.
|
||||
stealchain- Dump the verified chain from a TLS
|
||||
server/ connection from a client.
|
||||
stealchain/ Dump the verified chain from a TLS connection to a
|
||||
server.
|
||||
stealchain-server/
|
||||
Dump the verified chain from a TLS connection from
|
||||
from a client.
|
||||
subjhash/ Print or match subject info from a certificate.
|
||||
tlsinfo/ Print information about a TLS connection (the TLS version
|
||||
and cipher suite).
|
||||
tlskeypair/ Check whether a TLS certificate and key file match.
|
||||
utc/ Convert times to UTC.
|
||||
yamll/ A small YAML linter.
|
||||
zsearch/ Search for a string in directory of gzipped files.
|
||||
config/ A simple global configuration system where configuration
|
||||
data is pulled from a file or an environment variable
|
||||
transparently.
|
||||
iniconf/ A simple INI-style configuration system.
|
||||
dbg/ A debug printer.
|
||||
die/ Death of a program.
|
||||
fileutil/ Common file functions.
|
||||
lib/ Commonly-useful functions for writing Go programs.
|
||||
log/ A syslog library.
|
||||
logging/ A logging library.
|
||||
mwc/ MultiwriteCloser implementation.
|
||||
rand/ Utilities for working with math/rand.
|
||||
sbuf/ A byte buffer that can be wiped.
|
||||
seekbuf/ A read-seekable byte buffer.
|
||||
syslog/ Syslog-type logging.
|
||||
tee/ Emulate tee(1)'s functionality in io.Writers.
|
||||
testio/ Various I/O utilities useful during testing.
|
||||
testutil/ Various utility functions useful during testing.
|
||||
|
||||
twofactor/ Two-factor authentication.
|
||||
|
||||
Each program should have a small README in the directory with more
|
||||
information.
|
||||
|
||||
All code here is licensed under the ISC license.
|
||||
|
||||
All code here is licensed under the Apache 2.0 license.
|
||||
|
||||
Error handling
|
||||
--------------
|
||||
@@ -99,7 +116,7 @@ Examples:
|
||||
```
|
||||
cert, err := certlib.LoadCertificate(path)
|
||||
if err != nil {
|
||||
// sentinel match
|
||||
// sentinel match:
|
||||
if errors.Is(err, certerr.ErrEmptyCertificate) {
|
||||
// handle empty input
|
||||
}
|
||||
@@ -116,5 +133,3 @@ if err != nil {
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Avoid including sensitive data (keys, passwords, tokens) in error messages.
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const decay = 25 * time.Millisecond
|
||||
const decay = time.Second
|
||||
const maxDuration = 10 * time.Millisecond
|
||||
const interval = time.Millisecond
|
||||
|
||||
|
||||
677
certlib/bundler/bundler.go
Normal file
677
certlib/bundler/bundler.go
Normal file
@@ -0,0 +1,677 @@
|
||||
package bundler
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
)
|
||||
|
||||
const defaultFileMode = 0644
|
||||
|
||||
// Config represents the top-level YAML configuration.
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
Expiry string `yaml:"expiry"`
|
||||
} `yaml:"config"`
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains.
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates.
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options.
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
Manifest bool `yaml:"manifest"`
|
||||
Formats []string `yaml:"formats"`
|
||||
Encoding string `yaml:"encoding"`
|
||||
}
|
||||
|
||||
var formatExtensions = map[string]string{
|
||||
"zip": ".zip",
|
||||
"tgz": ".tar.gz",
|
||||
}
|
||||
|
||||
// Run performs the bundling operation given a config file path and an output directory.
|
||||
func Run(configFile string, outputDir string) error {
|
||||
if configFile == "" {
|
||||
return errors.New("configuration file required")
|
||||
}
|
||||
|
||||
cfg, err := loadConfig(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
expiryDuration := 365 * 24 * time.Hour
|
||||
if cfg.Config.Expiry != "" {
|
||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing expiry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(outputDir, 0750); err != nil {
|
||||
return fmt.Errorf("creating output directory: %w", err)
|
||||
}
|
||||
|
||||
totalFormats := 0
|
||||
for _, group := range cfg.Chains {
|
||||
totalFormats += len(group.Outputs.Formats)
|
||||
}
|
||||
createdFiles := make([]string, 0, totalFormats)
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, perr := processChainGroup(groupName, group, expiryDuration, outputDir)
|
||||
if perr != nil {
|
||||
return fmt.Errorf("processing chain group %s: %w", groupName, perr)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
}
|
||||
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||
return fmt.Errorf("generating hash file: %w", gerr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||
return nil, uerr
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Support simple formats like "1y", "6m", "30d"
|
||||
if len(s) < 2 {
|
||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||
}
|
||||
|
||||
unit := s[len(s)-1]
|
||||
value := s[:len(s)-1]
|
||||
|
||||
var multiplier time.Duration
|
||||
switch unit {
|
||||
case 'y', 'Y':
|
||||
multiplier = 365 * 24 * time.Hour
|
||||
case 'm', 'M':
|
||||
multiplier = 30 * 24 * time.Hour
|
||||
case 'd', 'D':
|
||||
multiplier = 24 * time.Hour
|
||||
default:
|
||||
return time.ParseDuration(s)
|
||||
}
|
||||
|
||||
var num int
|
||||
_, err := fmt.Sscanf(value, "%d", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||
}
|
||||
|
||||
return time.Duration(num) * multiplier, nil
|
||||
}
|
||||
|
||||
func processChainGroup(
|
||||
groupName string,
|
||||
group ChainGroup,
|
||||
expiryDuration time.Duration,
|
||||
outputDir string,
|
||||
) ([]string, error) {
|
||||
// Default encoding to "pem" if not specified
|
||||
encoding := group.Outputs.Encoding
|
||||
if encoding == "" {
|
||||
encoding = "pem"
|
||||
}
|
||||
|
||||
// Collect certificates from all chains in the group
|
||||
singleFileCerts, individualCerts, sourcePaths, err := loadAndCollectCerts(
|
||||
group.Certs,
|
||||
group.Outputs,
|
||||
expiryDuration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare files for inclusion in archives
|
||||
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, sourcePaths, group.Outputs, encoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create archives for the entire group
|
||||
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles, outputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||
func loadAndCollectCerts(
|
||||
chains []CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) ([]*x509.Certificate, []certWithPath, []string, error) {
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
var sourcePaths []string
|
||||
|
||||
for _, chain := range chains {
|
||||
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||
if cerr != nil {
|
||||
return nil, nil, nil, cerr
|
||||
}
|
||||
if len(s) > 0 {
|
||||
singleFileCerts = append(singleFileCerts, s...)
|
||||
}
|
||||
if len(i) > 0 {
|
||||
individualCerts = append(individualCerts, i...)
|
||||
}
|
||||
// Record source paths for timestamp preservation
|
||||
// Only append when loading succeeded
|
||||
sourcePaths = append(sourcePaths, chain.Root)
|
||||
sourcePaths = append(sourcePaths, chain.Intermediates...)
|
||||
}
|
||||
|
||||
return singleFileCerts, individualCerts, sourcePaths, nil
|
||||
}
|
||||
|
||||
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||
func collectFromChain(
|
||||
chain CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) (
|
||||
[]*x509.Certificate,
|
||||
[]certWithPath,
|
||||
error,
|
||||
) {
|
||||
var single []*x509.Certificate
|
||||
var indiv []certWithPath
|
||||
|
||||
// Load root certificate
|
||||
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||
if rerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, rootCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||
if lerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"intermediate %s is not properly signed by root %s: %w",
|
||||
intPath,
|
||||
chain.Root,
|
||||
sigErr,
|
||||
)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||
}
|
||||
}
|
||||
|
||||
return single, indiv, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives.
|
||||
func prepareArchiveFiles(
|
||||
singleFileCerts []*x509.Certificate,
|
||||
individualCerts []certWithPath,
|
||||
sourcePaths []string,
|
||||
outputs Outputs,
|
||||
encoding string,
|
||||
) ([]fileEntry, error) {
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Track used filenames to avoid collisions inside archives
|
||||
usedNames := make(map[string]int)
|
||||
|
||||
// Handle a single bundle file
|
||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
bundleTime := maxModTime(sourcePaths)
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||
}
|
||||
for i := range files {
|
||||
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||
files[i].modTime = bundleTime
|
||||
// Best-effort: we do not have a portable birth/creation time.
|
||||
// Use the same timestamp for created time to track deterministically.
|
||||
files[i].createTime = bundleTime
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
|
||||
// Handle individual files
|
||||
if outputs.IncludeIndividual {
|
||||
for _, cp := range individualCerts {
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||
}
|
||||
mt := fileModTime(cp.path)
|
||||
for i := range files {
|
||||
files[i].name = makeUniqueName(files[i].name, usedNames)
|
||||
files[i].modTime = mt
|
||||
files[i].createTime = mt
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate manifest if requested
|
||||
if outputs.Manifest {
|
||||
manifestContent := generateManifest(archiveFiles)
|
||||
manifestName := makeUniqueName("MANIFEST", usedNames)
|
||||
mt := maxModTime(sourcePaths)
|
||||
archiveFiles = append(archiveFiles, fileEntry{
|
||||
name: manifestName,
|
||||
content: manifestContent,
|
||||
modTime: mt,
|
||||
createTime: mt,
|
||||
})
|
||||
}
|
||||
|
||||
return archiveFiles, nil
|
||||
}
|
||||
|
||||
// createArchiveFiles creates archive files in the specified formats.
|
||||
func createArchiveFiles(
|
||||
groupName string,
|
||||
formats []string,
|
||||
archiveFiles []fileEntry,
|
||||
outputDir string,
|
||||
) ([]string, error) {
|
||||
createdFiles := make([]string, 0, len(formats))
|
||||
|
||||
for _, format := range formats {
|
||||
ext, ok := formatExtensions[format]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
createdFiles = append(createdFiles, archivePath)
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||
now := time.Now()
|
||||
expiryThreshold := now.Add(expiryDuration)
|
||||
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(
|
||||
os.Stderr,
|
||||
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||
path,
|
||||
-daysUntilExpiry,
|
||||
)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
name string
|
||||
content []byte
|
||||
modTime time.Time
|
||||
createTime time.Time
|
||||
}
|
||||
|
||||
type certWithPath struct {
|
||||
cert *x509.Certificate
|
||||
path string
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||
func encodeCertsToFiles(
|
||||
certs []*x509.Certificate,
|
||||
baseName string,
|
||||
encoding string,
|
||||
isSingle bool,
|
||||
) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
case "pem":
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
case "der":
|
||||
if isSingle {
|
||||
// For single file in DER, concatenate all cert DER bytes
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
// Individual DER file (should only have one cert)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
// Add DER version
|
||||
if isSingle {
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format.
|
||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||
}
|
||||
return pemContent
|
||||
}
|
||||
|
||||
func generateManifest(files []fileEntry) []byte {
|
||||
// Build a sorted list of files by filename to ensure deterministic manifest ordering
|
||||
sorted := make([]fileEntry, 0, len(files))
|
||||
for _, f := range files {
|
||||
// Defensive: skip any existing manifest entry
|
||||
if f.name == "MANIFEST" {
|
||||
continue
|
||||
}
|
||||
sorted = append(sorted, f)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool { return sorted[i].name < sorted[j].name })
|
||||
|
||||
var manifest strings.Builder
|
||||
for _, file := range sorted {
|
||||
hash := sha256.Sum256(file.content)
|
||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||
}
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||
for _, c := range closers {
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := c.Close(); cerr != nil {
|
||||
baseErr = errors.Join(baseErr, cerr)
|
||||
}
|
||||
}
|
||||
return baseErr
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, zerr := os.Create(path)
|
||||
if zerr != nil {
|
||||
return zerr
|
||||
}
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &zip.FileHeader{
|
||||
Name: file.name,
|
||||
Method: zip.Deflate,
|
||||
}
|
||||
if !file.modTime.IsZero() {
|
||||
hdr.SetModTime(file.modTime)
|
||||
}
|
||||
fw, werr := w.CreateHeader(hdr)
|
||||
if werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
if _, werr = fw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations
|
||||
if cerr := w.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, terr := os.Create(path)
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Mode: defaultFileMode,
|
||||
Size: int64(len(file.content)),
|
||||
ModTime: func() time.Time {
|
||||
if file.modTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return file.modTime
|
||||
}(),
|
||||
}
|
||||
// Set additional times if supported
|
||||
hdr.AccessTime = hdr.ModTime
|
||||
if !file.createTime.IsZero() {
|
||||
hdr.ChangeTime = file.createTime
|
||||
} else {
|
||||
hdr.ChangeTime = hdr.ModTime
|
||||
}
|
||||
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||
return closeWithErr(herr, tw, gw, f)
|
||||
}
|
||||
if _, werr := tw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, tw, gw, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations in the correct order
|
||||
if cerr := tw.Close(); cerr != nil {
|
||||
_ = gw.Close()
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
if cerr := gw.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func generateHashFile(path string, files []string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, rerr := os.ReadFile(file)
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeUniqueName ensures that each file name within the archive is unique by appending
|
||||
// an incremental numeric suffix before the extension when collisions occur.
|
||||
// Example: "root.pem" -> "root-2.pem", "root-3.pem", etc.
|
||||
func makeUniqueName(name string, used map[string]int) string {
|
||||
// If unused, mark and return as-is
|
||||
if _, ok := used[name]; !ok {
|
||||
used[name] = 1
|
||||
return name
|
||||
}
|
||||
|
||||
ext := filepath.Ext(name)
|
||||
base := strings.TrimSuffix(name, ext)
|
||||
// Track a counter per base+ext key
|
||||
key := base + ext
|
||||
counter := max(used[key], 1)
|
||||
for {
|
||||
counter++
|
||||
candidate := fmt.Sprintf("%s-%d%s", base, counter, ext)
|
||||
if _, exists := used[candidate]; !exists {
|
||||
used[key] = counter
|
||||
used[candidate] = 1
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fileModTime returns the file's modification time, or time.Now() if stat fails.
|
||||
func fileModTime(path string) time.Time {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
return fi.ModTime()
|
||||
}
|
||||
|
||||
// maxModTime returns the latest modification time across provided paths.
|
||||
// If the list is empty or stats fail, returns time.Now().
|
||||
func maxModTime(paths []string) time.Time {
|
||||
var zero time.Time
|
||||
maxTime := zero
|
||||
for _, p := range paths {
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mt := fi.ModTime()
|
||||
if maxTime.IsZero() || mt.After(maxTime) {
|
||||
maxTime = mt
|
||||
}
|
||||
}
|
||||
if maxTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return maxTime
|
||||
}
|
||||
175
certlib/fetch.go
Normal file
175
certlib/fetch.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package certlib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||
"git.wntrmute.dev/kyle/goutils/fileutil"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
// FetcherOpts are options for fetching certificates. They are only applicable to ServerFetcher.
|
||||
type FetcherOpts struct {
|
||||
SkipVerify bool
|
||||
Roots *x509.CertPool
|
||||
}
|
||||
|
||||
// Fetcher is an interface for fetching certificates from a remote source. It
|
||||
// currently supports fetching from a server or a file.
|
||||
type Fetcher interface {
|
||||
Get() (*x509.Certificate, error)
|
||||
GetChain() ([]*x509.Certificate, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
type ServerFetcher struct {
|
||||
host string
|
||||
port int
|
||||
insecure bool
|
||||
roots *x509.CertPool
|
||||
}
|
||||
|
||||
// WithRoots sets the roots for the ServerFetcher.
|
||||
func WithRoots(roots *x509.CertPool) func(*ServerFetcher) {
|
||||
return func(sf *ServerFetcher) {
|
||||
sf.roots = roots
|
||||
}
|
||||
}
|
||||
|
||||
// WithSkipVerify sets the insecure flag for the ServerFetcher.
|
||||
func WithSkipVerify() func(*ServerFetcher) {
|
||||
return func(sf *ServerFetcher) {
|
||||
sf.insecure = true
|
||||
}
|
||||
}
|
||||
|
||||
// ParseServer parses a server string into a ServerFetcher. It can be a URL or a
|
||||
// a host:port pair.
|
||||
func ParseServer(host string) (*ServerFetcher, error) {
|
||||
target, err := hosts.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse server: %w", err)
|
||||
}
|
||||
|
||||
return &ServerFetcher{
|
||||
host: target.Host,
|
||||
port: target.Port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) String() string {
|
||||
return fmt.Sprintf("tls://%s", net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1)))
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||
config := &tls.Config{
|
||||
InsecureSkipVerify: sf.insecure, // #nosec G402 - no shit sherlock
|
||||
RootCAs: sf.roots,
|
||||
}
|
||||
|
||||
dialer := &tls.Dialer{
|
||||
Config: config,
|
||||
}
|
||||
|
||||
hostSpec := net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1))
|
||||
|
||||
netConn, err := dialer.DialContext(context.Background(), "tcp", hostSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing server: %w", err)
|
||||
}
|
||||
|
||||
conn, ok := netConn.(*tls.Conn)
|
||||
if !ok {
|
||||
return nil, errors.New("connection is not TLS")
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
return state.PeerCertificates, nil
|
||||
}
|
||||
|
||||
func (sf *ServerFetcher) Get() (*x509.Certificate, error) {
|
||||
certs, err := sf.GetChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
type FileFetcher struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func NewFileFetcher(path string) *FileFetcher {
|
||||
return &FileFetcher{
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) String() string {
|
||||
return ff.path
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) GetChain() ([]*x509.Certificate, error) {
|
||||
if ff.path == "-" {
|
||||
certData, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read from stdin: %w", err)
|
||||
}
|
||||
|
||||
return ParseCertificatesPEM(certData)
|
||||
}
|
||||
|
||||
certs, err := LoadCertificates(ff.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load chain: %w", err)
|
||||
}
|
||||
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func (ff *FileFetcher) Get() (*x509.Certificate, error) {
|
||||
certs, err := ff.GetChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
// GetCertificateChain fetches a certificate chain from a remote source.
|
||||
func GetCertificateChain(spec string, opts *FetcherOpts) ([]*x509.Certificate, error) {
|
||||
if fileutil.FileDoesExist(spec) {
|
||||
return NewFileFetcher(spec).GetChain()
|
||||
}
|
||||
|
||||
fetcher, err := ParseServer(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
fetcher.insecure = opts.SkipVerify
|
||||
fetcher.roots = opts.Roots
|
||||
}
|
||||
|
||||
return fetcher.GetChain()
|
||||
}
|
||||
|
||||
// GetCertificate fetches the first certificate from a certificate chain.
|
||||
func GetCertificate(spec string, opts *FetcherOpts) (*x509.Certificate, error) {
|
||||
certs, err := GetCertificateChain(spec, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return certs[0], nil
|
||||
}
|
||||
@@ -396,6 +396,45 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
|
||||
return certs, rest, nil
|
||||
}
|
||||
|
||||
// LoadFullCertPool returns a certificate pool with roots and intermediates
|
||||
// from disk. If no roots are provided, the system root pool will be used.
|
||||
func LoadFullCertPool(roots, intermediates string) (*x509.CertPool, error) {
|
||||
var err error
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
if roots == "" {
|
||||
pool, err = x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading system cert pool: %w", err)
|
||||
}
|
||||
} else {
|
||||
var rootCerts []*x509.Certificate
|
||||
rootCerts, err = LoadCertificates(roots)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading roots: %w", err)
|
||||
}
|
||||
|
||||
for _, cert := range rootCerts {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
if intermediates != "" {
|
||||
var intCerts []*x509.Certificate
|
||||
intCerts, err = LoadCertificates(intermediates)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading intermediates: %w", err)
|
||||
}
|
||||
|
||||
for _, cert := range intCerts {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
if certsFile == "" {
|
||||
|
||||
@@ -26,7 +26,12 @@ func parseURL(host string) (string, int, error) {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
}
|
||||
|
||||
if strings.ToLower(url.Scheme) != "https" {
|
||||
switch strings.ToLower(url.Scheme) {
|
||||
case "https":
|
||||
// OK
|
||||
case "tls":
|
||||
// OK
|
||||
default:
|
||||
return "", 0, errors.New("certlib/hosts: only https scheme supported")
|
||||
}
|
||||
|
||||
@@ -43,28 +48,28 @@ func parseURL(host string) (string, int, error) {
|
||||
}
|
||||
|
||||
func parseHostPort(host string) (string, int, error) {
|
||||
host, sport, err := net.SplitHostPort(host)
|
||||
shost, sport, err := net.SplitHostPort(host)
|
||||
if err == nil {
|
||||
portInt, err2 := strconv.ParseInt(sport, 10, 16)
|
||||
if err2 != nil {
|
||||
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
|
||||
}
|
||||
|
||||
return host, int(portInt), nil
|
||||
return shost, int(portInt), nil
|
||||
}
|
||||
|
||||
return host, defaultHTTPSPort, nil
|
||||
}
|
||||
|
||||
func ParseHost(host string) (*Target, error) {
|
||||
host, port, err := parseURL(host)
|
||||
uhost, port, err := parseURL(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
return &Target{Host: uhost, Port: port}, nil
|
||||
}
|
||||
|
||||
host, port, err = parseHostPort(host)
|
||||
shost, port, err := parseHostPort(host)
|
||||
if err == nil {
|
||||
return &Target{Host: host, Port: port}, nil
|
||||
return &Target{Host: shost, Port: port}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)
|
||||
|
||||
35
certlib/hosts/hosts_test.go
Normal file
35
certlib/hosts/hosts_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package hosts_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
Host string
|
||||
Target hosts.Target
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{Host: "server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "tls://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "https://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
{Host: "https://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "tls://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
|
||||
{Host: "https://server-name/something/else", Target: hosts.Target{Host: "server-name", Port: 443}},
|
||||
}
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
for i, tc := range testCases {
|
||||
target, err := hosts.ParseHost(tc.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("test case %d: %s", i+1, err)
|
||||
}
|
||||
|
||||
if target.Host != tc.Target.Host {
|
||||
t.Fatalf("test case %d: got host '%s', want host '%s'", i+1, target.Host, tc.Target.Host)
|
||||
}
|
||||
}
|
||||
}
|
||||
28
cmd/cert-bundler/Dockerfile
Normal file
28
cmd/cert-bundler/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# Build and runtime image for cert-bundler
|
||||
# Usage (from repo root or cmd/cert-bundler directory):
|
||||
# docker build -t cert-bundler:latest -f cmd/cert-bundler/Dockerfile .
|
||||
# docker run --rm -v "$PWD":/work cert-bundler:latest
|
||||
# This expects a /work/bundle.yaml file in the mounted directory and
|
||||
# will write generated bundles to /work/bundle.
|
||||
|
||||
# Build stage
|
||||
FROM golang:1.24.3-alpine AS build
|
||||
WORKDIR /src
|
||||
|
||||
# Copy go module files and download dependencies first for better caching
|
||||
RUN go install git.wntrmute.dev/kyle/goutils/cmd/cert-bundler@v1.13.2 && \
|
||||
mv /go/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||
|
||||
# Runtime stage (kept as golang:alpine per requirement)
|
||||
FROM golang:1.24.3-alpine
|
||||
|
||||
# Create a work directory that users will typically mount into
|
||||
WORKDIR /work
|
||||
VOLUME ["/work"]
|
||||
|
||||
# Copy the built binary from the builder stage
|
||||
COPY --from=build /usr/local/bin/cert-bundler /usr/local/bin/cert-bundler
|
||||
|
||||
# Default command: read bundle.yaml from current directory and output to ./bundle
|
||||
ENTRYPOINT ["/usr/local/bin/cert-bundler"]
|
||||
CMD ["-c", "/work/bundle.yaml", "-o", "/work/bundle"]
|
||||
@@ -1,66 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
_ "embed"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"gopkg.in/yaml.v2"
|
||||
"git.wntrmute.dev/kyle/goutils/certlib/bundler"
|
||||
)
|
||||
|
||||
// Config represents the top-level YAML configuration.
|
||||
type Config struct {
|
||||
Config struct {
|
||||
Hashes string `yaml:"hashes"`
|
||||
Expiry string `yaml:"expiry"`
|
||||
} `yaml:"config"`
|
||||
Chains map[string]ChainGroup `yaml:"chains"`
|
||||
}
|
||||
|
||||
// ChainGroup represents a named group of certificate chains.
|
||||
type ChainGroup struct {
|
||||
Certs []CertChain `yaml:"certs"`
|
||||
Outputs Outputs `yaml:"outputs"`
|
||||
}
|
||||
|
||||
// CertChain represents a root certificate and its intermediates.
|
||||
type CertChain struct {
|
||||
Root string `yaml:"root"`
|
||||
Intermediates []string `yaml:"intermediates"`
|
||||
}
|
||||
|
||||
// Outputs defines output format options.
|
||||
type Outputs struct {
|
||||
IncludeSingle bool `yaml:"include_single"`
|
||||
IncludeIndividual bool `yaml:"include_individual"`
|
||||
Manifest bool `yaml:"manifest"`
|
||||
Formats []string `yaml:"formats"`
|
||||
Encoding string `yaml:"encoding"`
|
||||
}
|
||||
|
||||
var (
|
||||
configFile string
|
||||
outputDir string
|
||||
)
|
||||
|
||||
var formatExtensions = map[string]string{
|
||||
"zip": ".zip",
|
||||
"tgz": ".tar.gz",
|
||||
}
|
||||
|
||||
//go:embed README.txt
|
||||
var readmeContent string
|
||||
|
||||
@@ -79,497 +32,10 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Load and parse configuration
|
||||
cfg, err := loadConfig(configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
|
||||
if err := bundler.Run(configFile, outputDir); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse expiry duration (default 1 year)
|
||||
expiryDuration := 365 * 24 * time.Hour
|
||||
if cfg.Config.Expiry != "" {
|
||||
expiryDuration, err = parseDuration(cfg.Config.Expiry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Create output directory if it doesn't exist
|
||||
err = os.MkdirAll(outputDir, 0750)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Process each chain group
|
||||
// Pre-allocate createdFiles based on total number of formats across all groups
|
||||
totalFormats := 0
|
||||
for _, group := range cfg.Chains {
|
||||
totalFormats += len(group.Outputs.Formats)
|
||||
}
|
||||
createdFiles := make([]string, 0, totalFormats)
|
||||
for groupName, group := range cfg.Chains {
|
||||
files, perr := processChainGroup(groupName, group, expiryDuration)
|
||||
if perr != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
|
||||
os.Exit(1)
|
||||
}
|
||||
createdFiles = append(createdFiles, files...)
|
||||
}
|
||||
|
||||
// Generate hash file for all created archives
|
||||
if cfg.Config.Hashes != "" {
|
||||
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
|
||||
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Certificate bundling completed successfully")
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
|
||||
return nil, uerr
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// Support simple formats like "1y", "6m", "30d"
|
||||
if len(s) < 2 {
|
||||
return 0, fmt.Errorf("invalid duration format: %s", s)
|
||||
}
|
||||
|
||||
unit := s[len(s)-1]
|
||||
value := s[:len(s)-1]
|
||||
|
||||
var multiplier time.Duration
|
||||
switch unit {
|
||||
case 'y', 'Y':
|
||||
multiplier = 365 * 24 * time.Hour
|
||||
case 'm', 'M':
|
||||
multiplier = 30 * 24 * time.Hour
|
||||
case 'd', 'D':
|
||||
multiplier = 24 * time.Hour
|
||||
default:
|
||||
return time.ParseDuration(s)
|
||||
}
|
||||
|
||||
var num int
|
||||
_, err := fmt.Sscanf(value, "%d", &num)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration value: %s", s)
|
||||
}
|
||||
|
||||
return time.Duration(num) * multiplier, nil
|
||||
}
|
||||
|
||||
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
|
||||
// Default encoding to "pem" if not specified
|
||||
encoding := group.Outputs.Encoding
|
||||
if encoding == "" {
|
||||
encoding = "pem"
|
||||
}
|
||||
|
||||
// Collect certificates from all chains in the group
|
||||
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare files for inclusion in archives
|
||||
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create archives for the entire group
|
||||
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
|
||||
func loadAndCollectCerts(
|
||||
chains []CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) ([]*x509.Certificate, []certWithPath, error) {
|
||||
var singleFileCerts []*x509.Certificate
|
||||
var individualCerts []certWithPath
|
||||
|
||||
for _, chain := range chains {
|
||||
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
|
||||
if cerr != nil {
|
||||
return nil, nil, cerr
|
||||
}
|
||||
if len(s) > 0 {
|
||||
singleFileCerts = append(singleFileCerts, s...)
|
||||
}
|
||||
if len(i) > 0 {
|
||||
individualCerts = append(individualCerts, i...)
|
||||
}
|
||||
}
|
||||
|
||||
return singleFileCerts, individualCerts, nil
|
||||
}
|
||||
|
||||
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
|
||||
func collectFromChain(
|
||||
chain CertChain,
|
||||
outputs Outputs,
|
||||
expiryDuration time.Duration,
|
||||
) (
|
||||
[]*x509.Certificate,
|
||||
[]certWithPath,
|
||||
error,
|
||||
) {
|
||||
var single []*x509.Certificate
|
||||
var indiv []certWithPath
|
||||
|
||||
// Load root certificate
|
||||
rootCert, rerr := certlib.LoadCertificate(chain.Root)
|
||||
if rerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
|
||||
}
|
||||
|
||||
// Check expiry for root
|
||||
checkExpiry(chain.Root, rootCert, expiryDuration)
|
||||
|
||||
// Add root to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, rootCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
|
||||
}
|
||||
|
||||
// Load and validate intermediates
|
||||
for _, intPath := range chain.Intermediates {
|
||||
intCert, lerr := certlib.LoadCertificate(intPath)
|
||||
if lerr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
|
||||
}
|
||||
|
||||
// Validate that intermediate is signed by root
|
||||
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"intermediate %s is not properly signed by root %s: %w",
|
||||
intPath,
|
||||
chain.Root,
|
||||
sigErr,
|
||||
)
|
||||
}
|
||||
|
||||
// Check expiry for intermediate
|
||||
checkExpiry(intPath, intCert, expiryDuration)
|
||||
|
||||
// Add intermediate to collections if needed
|
||||
if outputs.IncludeSingle {
|
||||
single = append(single, intCert)
|
||||
}
|
||||
if outputs.IncludeIndividual {
|
||||
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
|
||||
}
|
||||
}
|
||||
|
||||
return single, indiv, nil
|
||||
}
|
||||
|
||||
// prepareArchiveFiles prepares all files to be included in archives.
|
||||
func prepareArchiveFiles(
|
||||
singleFileCerts []*x509.Certificate,
|
||||
individualCerts []certWithPath,
|
||||
outputs Outputs,
|
||||
encoding string,
|
||||
) ([]fileEntry, error) {
|
||||
var archiveFiles []fileEntry
|
||||
|
||||
// Handle a single bundle file
|
||||
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
|
||||
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
|
||||
// Handle individual files
|
||||
if outputs.IncludeIndividual {
|
||||
for _, cp := range individualCerts {
|
||||
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
|
||||
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
|
||||
}
|
||||
archiveFiles = append(archiveFiles, files...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate manifest if requested
|
||||
if outputs.Manifest {
|
||||
manifestContent := generateManifest(archiveFiles)
|
||||
archiveFiles = append(archiveFiles, fileEntry{
|
||||
name: "MANIFEST",
|
||||
content: manifestContent,
|
||||
})
|
||||
}
|
||||
|
||||
return archiveFiles, nil
|
||||
}
|
||||
|
||||
// createArchiveFiles creates archive files in the specified formats.
|
||||
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
|
||||
createdFiles := make([]string, 0, len(formats))
|
||||
|
||||
for _, format := range formats {
|
||||
ext, ok := formatExtensions[format]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
archivePath := filepath.Join(outputDir, groupName+ext)
|
||||
switch format {
|
||||
case "zip":
|
||||
if err := createZipArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip archive: %w", err)
|
||||
}
|
||||
case "tgz":
|
||||
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
|
||||
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
createdFiles = append(createdFiles, archivePath)
|
||||
}
|
||||
|
||||
return createdFiles, nil
|
||||
}
|
||||
|
||||
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
|
||||
now := time.Now()
|
||||
expiryThreshold := now.Add(expiryDuration)
|
||||
|
||||
if cert.NotAfter.Before(expiryThreshold) {
|
||||
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
|
||||
if daysUntilExpiry < 0 {
|
||||
fmt.Fprintf(
|
||||
os.Stderr,
|
||||
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
|
||||
path,
|
||||
-daysUntilExpiry,
|
||||
)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileEntry struct {
|
||||
name string
|
||||
content []byte
|
||||
}
|
||||
|
||||
type certWithPath struct {
|
||||
cert *x509.Certificate
|
||||
path string
|
||||
}
|
||||
|
||||
// encodeCertsToFiles converts certificates to file entries based on encoding type
|
||||
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
|
||||
func encodeCertsToFiles(
|
||||
certs []*x509.Certificate,
|
||||
baseName string,
|
||||
encoding string,
|
||||
isSingle bool,
|
||||
) ([]fileEntry, error) {
|
||||
var files []fileEntry
|
||||
|
||||
switch encoding {
|
||||
case "pem":
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
case "der":
|
||||
if isSingle {
|
||||
// For single file in DER, concatenate all cert DER bytes
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
// Individual DER file (should only have one cert)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
case "both":
|
||||
// Add PEM version
|
||||
pemContent := encodeCertsToPEM(certs)
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".pem",
|
||||
content: pemContent,
|
||||
})
|
||||
// Add DER version
|
||||
if isSingle {
|
||||
var derContent []byte
|
||||
for _, cert := range certs {
|
||||
derContent = append(derContent, cert.Raw...)
|
||||
}
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: derContent,
|
||||
})
|
||||
} else if len(certs) > 0 {
|
||||
files = append(files, fileEntry{
|
||||
name: baseName + ".crt",
|
||||
content: certs[0].Raw,
|
||||
})
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// encodeCertsToPEM encodes certificates to PEM format.
|
||||
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
|
||||
var pemContent []byte
|
||||
for _, cert := range certs {
|
||||
pemBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
}
|
||||
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
|
||||
}
|
||||
return pemContent
|
||||
}
|
||||
|
||||
func generateManifest(files []fileEntry) []byte {
|
||||
var manifest strings.Builder
|
||||
for _, file := range files {
|
||||
if file.name == "MANIFEST" {
|
||||
continue
|
||||
}
|
||||
hash := sha256.Sum256(file.content)
|
||||
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
|
||||
}
|
||||
return []byte(manifest.String())
|
||||
}
|
||||
|
||||
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
|
||||
func closeWithErr(baseErr error, closers ...io.Closer) error {
|
||||
for _, c := range closers {
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
if cerr := c.Close(); cerr != nil {
|
||||
baseErr = errors.Join(baseErr, cerr)
|
||||
}
|
||||
}
|
||||
return baseErr
|
||||
}
|
||||
|
||||
func createZipArchive(path string, files []fileEntry) error {
|
||||
f, zerr := os.Create(path)
|
||||
if zerr != nil {
|
||||
return zerr
|
||||
}
|
||||
|
||||
w := zip.NewWriter(f)
|
||||
|
||||
for _, file := range files {
|
||||
fw, werr := w.Create(file.name)
|
||||
if werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
if _, werr = fw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, w, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations
|
||||
if cerr := w.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func createTarGzArchive(path string, files []fileEntry) error {
|
||||
f, terr := os.Create(path)
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.name,
|
||||
Mode: 0644,
|
||||
Size: int64(len(file.content)),
|
||||
}
|
||||
if herr := tw.WriteHeader(hdr); herr != nil {
|
||||
return closeWithErr(herr, tw, gw, f)
|
||||
}
|
||||
if _, werr := tw.Write(file.content); werr != nil {
|
||||
return closeWithErr(werr, tw, gw, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Check errors on close operations in the correct order
|
||||
if cerr := tw.Close(); cerr != nil {
|
||||
_ = gw.Close()
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
if cerr := gw.Close(); cerr != nil {
|
||||
_ = f.Close()
|
||||
return cerr
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func generateHashFile(path string, files []string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, file := range files {
|
||||
data, rerr := os.ReadFile(file)
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
This project is an exploration into the utility of Jetbrains' Junie
|
||||
to write smaller but tedious programs.
|
||||
|
||||
Task: build a certificate bundling tool in cmd/cert-bundler. It
|
||||
creates archives of certificates chains.
|
||||
|
||||
A YAML file for this looks something like:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: roots/core-ca.pem
|
||||
intermediates:
|
||||
- int/cca1.pem
|
||||
- int/cca2.pem
|
||||
- int/cca3.pem
|
||||
- root: roots/ssh-ca.pem
|
||||
intermediates:
|
||||
- ssh/ssh_dmz1.pem
|
||||
- ssh/ssh_internal.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
```
|
||||
|
||||
Some requirements:
|
||||
|
||||
1. First, all the certificates should be loaded.
|
||||
2. For each root, each of the indivudal intermediates should be
|
||||
checked to make sure they are properly signed by the root CA.
|
||||
3. The program should optionally take an expiration period (defaulting
|
||||
to one year), specified in config.expiration, and if any certificate
|
||||
is within that expiration period, a warning should be printed.
|
||||
4. If outputs.include_single is true, all certificates under chains
|
||||
should be concatenated into a single file.
|
||||
5. If outputs.include_individual is true, all certificates under
|
||||
chains should be included at the root level (e.g. int/cca2.pem
|
||||
would be cca2.pem in the archive).
|
||||
6. If bundle.manifest is true, a "MANIFEST" file is created with
|
||||
SHA256 sums of each file included in the archive.
|
||||
7. For each of the formats, create an archive file in the output
|
||||
directory (specified with `-o`) with that format.
|
||||
- If zip is included, create a .zip file.
|
||||
- If tgz is included, create a .tar.gz file with default compression
|
||||
levels.
|
||||
- All archive files should include any generated files (single
|
||||
and/or individual) in the top-level directory.
|
||||
8. In the output directory, create a file with the same name as
|
||||
config.hashes that contains the SHA256 sum of all files created.
|
||||
|
||||
-----
|
||||
|
||||
The outputs.include_single and outputs.include_individual describe
|
||||
what should go in the final archive. If both are specified, the output
|
||||
archive should include both a single bundle.pem and each individual
|
||||
certificate, for example.
|
||||
|
||||
-----
|
||||
|
||||
As it stands, given the following `bundle.yaml`:
|
||||
|
||||
``` yaml
|
||||
config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
google_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: false
|
||||
manifest: true
|
||||
formats:
|
||||
- tgz
|
||||
lets_encrypt:
|
||||
certs:
|
||||
- root: pems/isrg-root-x1.pem
|
||||
intermediates:
|
||||
- pems/le-e7.pem
|
||||
outputs:
|
||||
include_single: false
|
||||
include_individual: true
|
||||
manifest: false
|
||||
formats:
|
||||
- zip
|
||||
```
|
||||
|
||||
The program outputs the following files:
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs_0.tgz (contains individual certs)
|
||||
- core_certs_0.zip (contains individual certs)
|
||||
- core_certs_1.tgz (contains core_certs.pem)
|
||||
- core_certs_1.zip (contains core_certs.pem)
|
||||
- google_certs_0.tgz
|
||||
- lets_encrypt_0.zip
|
||||
|
||||
It should output
|
||||
|
||||
- bundle.sha256
|
||||
- core_certs.tgz
|
||||
- core_certs.zip
|
||||
- google_certs.tgz
|
||||
- lets_encrypt.zip
|
||||
|
||||
core_certs.* should contain `bundle.pem` and all the individual
|
||||
certs. There should be no _$n$ variants of archives.
|
||||
|
||||
-----
|
||||
|
||||
Add an additional field to outputs: encoding. It should accept one of
|
||||
`der`, `pem`, or `both`. If `der`, certificates should be output as a
|
||||
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
|
||||
should be output as a `.pem` file containing a PEM-encoded certificate.
|
||||
If both, both the `.crt` and `.pem` certificate should be included.
|
||||
|
||||
For example, given the previous config, if `encoding` is der, the
|
||||
google_certs.tgz archive should contain
|
||||
|
||||
- bundle.crt
|
||||
- MANIFEST
|
||||
|
||||
Or with lets_encrypt.zip:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- le-e7.crt
|
||||
|
||||
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.pem
|
||||
|
||||
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
|
||||
|
||||
- isrg-root-x1.crt
|
||||
- isrg-root-x1.pem
|
||||
- le-e7.crt
|
||||
- le-e7.pem
|
||||
|
||||
-----
|
||||
|
||||
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
|
||||
|
||||
-----
|
||||
|
||||
Move the format extensions to a global variable.
|
||||
|
||||
-----
|
||||
|
||||
Write a README.txt with a description of the bundle.yaml format.
|
||||
|
||||
Additionally, update the help text for the program (e.g. with `-h`)
|
||||
to provide the same detailed information.
|
||||
|
||||
-----
|
||||
|
||||
It may be easier to embed the README.txt in the program on build.
|
||||
|
||||
-----
|
||||
|
||||
For the archive (tar.gz and zip) writers, make sure errors are
|
||||
checked at the end, and don't just defer the close operations.
|
||||
|
||||
|
||||
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
13
cmd/cert-bundler/testdata/bundle.yaml
vendored
@@ -2,6 +2,19 @@ config:
|
||||
hashes: bundle.sha256
|
||||
expiry: 1y
|
||||
chains:
|
||||
weird:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
intermediates:
|
||||
- pems/goog-wr2.pem
|
||||
- root: pems/isrg-root-x1.pem
|
||||
outputs:
|
||||
include_single: true
|
||||
include_individual: true
|
||||
manifest: true
|
||||
formats:
|
||||
- zip
|
||||
- tgz
|
||||
core_certs:
|
||||
certs:
|
||||
- root: pems/gts-r1.pem
|
||||
|
||||
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
4
cmd/cert-bundler/testdata/pkg/bundle.sha256
vendored
@@ -1,4 +0,0 @@
|
||||
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
|
||||
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
|
||||
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
|
||||
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip
|
||||
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/core_certs.zip
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
BIN
cmd/cert-bundler/testdata/pkg/google_certs.tgz
vendored
Binary file not shown.
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
BIN
cmd/cert-bundler/testdata/pkg/lets_encrypt.zip
vendored
Binary file not shown.
@@ -2,27 +2,151 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kr/text"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
|
||||
// "\2: \1,")
|
||||
|
||||
const (
|
||||
sSHA256 = "SHA256"
|
||||
sSHA512 = "SHA512"
|
||||
)
|
||||
|
||||
var keyUsage = map[x509.KeyUsage]string{
|
||||
x509.KeyUsageDigitalSignature: "digital signature",
|
||||
x509.KeyUsageContentCommitment: "content committment",
|
||||
x509.KeyUsageKeyEncipherment: "key encipherment",
|
||||
x509.KeyUsageKeyAgreement: "key agreement",
|
||||
x509.KeyUsageDataEncipherment: "data encipherment",
|
||||
x509.KeyUsageCertSign: "cert sign",
|
||||
x509.KeyUsageCRLSign: "crl sign",
|
||||
x509.KeyUsageEncipherOnly: "encipher only",
|
||||
x509.KeyUsageDecipherOnly: "decipher only",
|
||||
}
|
||||
|
||||
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
||||
x509.ExtKeyUsageAny: "any",
|
||||
x509.ExtKeyUsageServerAuth: "server auth",
|
||||
x509.ExtKeyUsageClientAuth: "client auth",
|
||||
x509.ExtKeyUsageCodeSigning: "code signing",
|
||||
x509.ExtKeyUsageEmailProtection: "s/mime",
|
||||
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
||||
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
||||
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
||||
x509.ExtKeyUsageTimeStamping: "timestamping",
|
||||
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
||||
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
||||
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
||||
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
|
||||
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
|
||||
}
|
||||
|
||||
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
||||
return "RSA"
|
||||
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
|
||||
return "RSA-PSS"
|
||||
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
return "ECDSA"
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return "DSA"
|
||||
case x509.PureEd25519:
|
||||
return "Ed25519"
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown public key algorithm"
|
||||
default:
|
||||
return "unknown public key algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
||||
return sSHA256
|
||||
case x509.SHA256WithRSAPSS:
|
||||
return sSHA256
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.SHA384WithRSAPSS:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
return sSHA512
|
||||
case x509.SHA512WithRSAPSS:
|
||||
return sSHA512
|
||||
case x509.PureEd25519:
|
||||
return sSHA512
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown hash algorithm"
|
||||
default:
|
||||
return "unknown hash algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
s := " "
|
||||
var sSb97 strings.Builder
|
||||
for range n {
|
||||
sSb97.WriteString(" ")
|
||||
}
|
||||
s += sSb97.String()
|
||||
return s
|
||||
}
|
||||
|
||||
func indentLen(n int) int {
|
||||
return 4 + (8 * n)
|
||||
}
|
||||
|
||||
// this isn't real efficient, but that's not a problem here.
|
||||
func wrap(s string, indent int) string {
|
||||
if indent > 3 {
|
||||
indent = 3
|
||||
}
|
||||
|
||||
wrapped := text.Wrap(s, maxLine)
|
||||
lines := strings.SplitN(wrapped, "\n", 2)
|
||||
if len(lines) == 1 {
|
||||
return lines[0]
|
||||
}
|
||||
|
||||
if (maxLine - indentLen(indent)) <= 0 {
|
||||
panic("too much indentation")
|
||||
}
|
||||
|
||||
rest := strings.Join(lines[1:], " ")
|
||||
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
||||
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
return lib.HexEncode(in, lib.HexEncodeUpperColon)
|
||||
}
|
||||
|
||||
func certPublic(cert *x509.Certificate) string {
|
||||
switch pub := cert.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
@@ -116,7 +240,7 @@ func showBasicConstraints(cert *x509.Certificate) {
|
||||
fmt.Fprint(os.Stdout, " (basic constraint failure)")
|
||||
}
|
||||
} else {
|
||||
fmt.Fprint(os.Stdout, "is not a CA certificate")
|
||||
fmt.Fprint(os.Stdout, ", is not a CA certificate")
|
||||
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
|
||||
fmt.Fprint(os.Stdout, " (key encipherment usage enabled!)")
|
||||
}
|
||||
@@ -220,122 +344,6 @@ func displayCert(cert *x509.Certificate) {
|
||||
}
|
||||
}
|
||||
|
||||
func displayAllCerts(in []byte, leafOnly bool) {
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
if err != nil {
|
||||
certs, _, err = certlib.ParseCertificatesDER(in, "")
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "failed to parse certificates")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(certs) == 0 {
|
||||
_, _ = lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
if leafOnly {
|
||||
displayCert(certs[0])
|
||||
return
|
||||
}
|
||||
|
||||
for i := range certs {
|
||||
displayCert(certs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func displayAllCertsWeb(uri string, leafOnly bool) {
|
||||
ci := getConnInfo(uri)
|
||||
d := &tls.Dialer{Config: permissiveConfig()}
|
||||
nc, err := d.DialContext(context.Background(), "tcp", ci.Addr)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't connect to %s", ci.Addr)
|
||||
return
|
||||
}
|
||||
|
||||
conn, ok := nc.(*tls.Conn)
|
||||
if !ok {
|
||||
_, _ = lib.Warnx("invalid TLS connection (not a *tls.Conn)")
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
if err = conn.Close(); err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't close TLS connection")
|
||||
}
|
||||
|
||||
d = &tls.Dialer{Config: verifyConfig(ci.Host)}
|
||||
nc, err = d.DialContext(context.Background(), "tcp", ci.Addr)
|
||||
if err == nil {
|
||||
conn, ok = nc.(*tls.Conn)
|
||||
if !ok {
|
||||
_, _ = lib.Warnx("invalid TLS connection (not a *tls.Conn)")
|
||||
return
|
||||
}
|
||||
|
||||
err = conn.VerifyHostname(ci.Host)
|
||||
if err == nil {
|
||||
state = conn.ConnectionState()
|
||||
}
|
||||
conn.Close()
|
||||
} else {
|
||||
_, _ = lib.Warn(err, "TLS verification error with server name %s", ci.Host)
|
||||
}
|
||||
|
||||
if len(state.PeerCertificates) == 0 {
|
||||
_, _ = lib.Warnx("no certificates found")
|
||||
return
|
||||
}
|
||||
|
||||
if leafOnly {
|
||||
displayCert(state.PeerCertificates[0])
|
||||
return
|
||||
}
|
||||
|
||||
if len(state.VerifiedChains) == 0 {
|
||||
_, _ = lib.Warnx("no verified chains found; using peer chain")
|
||||
for i := range state.PeerCertificates {
|
||||
displayCert(state.PeerCertificates[i])
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(os.Stdout, "TLS chain verified successfully.")
|
||||
for i := range state.VerifiedChains {
|
||||
fmt.Fprintf(os.Stdout, "--- Verified certificate chain %d ---%s", i+1, "\n")
|
||||
for j := range state.VerifiedChains[i] {
|
||||
displayCert(state.VerifiedChains[i][j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldReadStdin(argc int, argv []string) bool {
|
||||
if argc == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if argc == 1 && argv[0] == "-" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func readStdin(leafOnly bool) {
|
||||
certs, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't read certificates from standard input")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// This is needed for getting certs from JSON/jq.
|
||||
certs = bytes.TrimSpace(certs)
|
||||
certs = bytes.ReplaceAll(certs, []byte(`\n`), []byte{0xa})
|
||||
certs = bytes.Trim(certs, `"`)
|
||||
displayAllCerts(certs, leafOnly)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var leafOnly bool
|
||||
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
|
||||
@@ -343,23 +351,26 @@ func main() {
|
||||
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
|
||||
flag.Parse()
|
||||
|
||||
if shouldReadStdin(flag.NArg(), flag.Args()) {
|
||||
readStdin(leafOnly)
|
||||
return
|
||||
opts := &certlib.FetcherOpts{
|
||||
SkipVerify: true,
|
||||
Roots: nil,
|
||||
}
|
||||
|
||||
for _, filename := range flag.Args() {
|
||||
fmt.Fprintf(os.Stdout, "--%s ---%s", filename, "\n")
|
||||
if strings.HasPrefix(filename, "https://") {
|
||||
displayAllCertsWeb(filename, leafOnly)
|
||||
} else {
|
||||
in, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
certs, err := certlib.GetCertificateChain(filename, opts)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "couldn't read certificate")
|
||||
continue
|
||||
}
|
||||
|
||||
displayAllCerts(in, leafOnly)
|
||||
if leafOnly {
|
||||
displayCert(certs[0])
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range certs {
|
||||
displayCert(certs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
|
||||
// "\2: \1,")
|
||||
|
||||
const (
|
||||
sSHA256 = "SHA256"
|
||||
sSHA512 = "SHA512"
|
||||
)
|
||||
|
||||
var keyUsage = map[x509.KeyUsage]string{
|
||||
x509.KeyUsageDigitalSignature: "digital signature",
|
||||
x509.KeyUsageContentCommitment: "content committment",
|
||||
x509.KeyUsageKeyEncipherment: "key encipherment",
|
||||
x509.KeyUsageKeyAgreement: "key agreement",
|
||||
x509.KeyUsageDataEncipherment: "data encipherment",
|
||||
x509.KeyUsageCertSign: "cert sign",
|
||||
x509.KeyUsageCRLSign: "crl sign",
|
||||
x509.KeyUsageEncipherOnly: "encipher only",
|
||||
x509.KeyUsageDecipherOnly: "decipher only",
|
||||
}
|
||||
|
||||
var extKeyUsages = map[x509.ExtKeyUsage]string{
|
||||
x509.ExtKeyUsageAny: "any",
|
||||
x509.ExtKeyUsageServerAuth: "server auth",
|
||||
x509.ExtKeyUsageClientAuth: "client auth",
|
||||
x509.ExtKeyUsageCodeSigning: "code signing",
|
||||
x509.ExtKeyUsageEmailProtection: "s/mime",
|
||||
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
|
||||
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
|
||||
x509.ExtKeyUsageIPSECUser: "ipsec user",
|
||||
x509.ExtKeyUsageTimeStamping: "timestamping",
|
||||
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
|
||||
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
|
||||
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
|
||||
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
|
||||
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
|
||||
}
|
||||
|
||||
func sigAlgoPK(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
|
||||
return "RSA"
|
||||
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
|
||||
return "RSA-PSS"
|
||||
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
|
||||
return "ECDSA"
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return "DSA"
|
||||
case x509.PureEd25519:
|
||||
return "Ed25519"
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown public key algorithm"
|
||||
default:
|
||||
return "unknown public key algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
func sigAlgoHash(a x509.SignatureAlgorithm) string {
|
||||
switch a {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
|
||||
return sSHA256
|
||||
case x509.SHA256WithRSAPSS:
|
||||
return sSHA256
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.SHA384WithRSAPSS:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
return sSHA512
|
||||
case x509.SHA512WithRSAPSS:
|
||||
return sSHA512
|
||||
case x509.PureEd25519:
|
||||
return sSHA512
|
||||
case x509.UnknownSignatureAlgorithm:
|
||||
return "unknown hash algorithm"
|
||||
default:
|
||||
return "unknown hash algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
const maxLine = 78
|
||||
|
||||
func makeIndent(n int) string {
|
||||
s := " "
|
||||
var sSb97 strings.Builder
|
||||
for range n {
|
||||
sSb97.WriteString(" ")
|
||||
}
|
||||
s += sSb97.String()
|
||||
return s
|
||||
}
|
||||
|
||||
func indentLen(n int) int {
|
||||
return 4 + (8 * n)
|
||||
}
|
||||
|
||||
// this isn't real efficient, but that's not a problem here.
|
||||
func wrap(s string, indent int) string {
|
||||
if indent > 3 {
|
||||
indent = 3
|
||||
}
|
||||
|
||||
wrapped := text.Wrap(s, maxLine)
|
||||
lines := strings.SplitN(wrapped, "\n", 2)
|
||||
if len(lines) == 1 {
|
||||
return lines[0]
|
||||
}
|
||||
|
||||
if (maxLine - indentLen(indent)) <= 0 {
|
||||
panic("too much indentation")
|
||||
}
|
||||
|
||||
rest := strings.Join(lines[1:], " ")
|
||||
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
|
||||
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
var s string
|
||||
var sSb130 strings.Builder
|
||||
for i := range in {
|
||||
sSb130.WriteString(fmt.Sprintf("%02X:", in[i]))
|
||||
}
|
||||
s += sSb130.String()
|
||||
|
||||
return strings.Trim(s, ":")
|
||||
}
|
||||
|
||||
// permissiveConfig returns a maximally-accepting TLS configuration;
|
||||
// the purpose is to look at the cert, not verify the security properties
|
||||
// of the connection.
|
||||
func permissiveConfig() *tls.Config {
|
||||
return &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
} // #nosec G402
|
||||
}
|
||||
|
||||
// verifyConfig returns a config that will verify the connection.
|
||||
func verifyConfig(hostname string) *tls.Config {
|
||||
return &tls.Config{
|
||||
ServerName: hostname,
|
||||
} // #nosec G402
|
||||
}
|
||||
|
||||
type connInfo struct {
|
||||
// The original URI provided.
|
||||
URI string
|
||||
|
||||
// The hostname of the server.
|
||||
Host string
|
||||
|
||||
// The port to connect on.
|
||||
Port string
|
||||
|
||||
// The address to connect to.
|
||||
Addr string
|
||||
}
|
||||
|
||||
func getConnInfo(uri string) *connInfo {
|
||||
ci := &connInfo{URI: uri}
|
||||
ci.Host = uri[len("https://"):]
|
||||
|
||||
host, port, err := net.SplitHostPort(ci.Host)
|
||||
if err != nil {
|
||||
ci.Port = "443"
|
||||
} else {
|
||||
ci.Host = host
|
||||
ci.Port = port
|
||||
}
|
||||
ci.Addr = net.JoinHostPort(ci.Host, ci.Port)
|
||||
return ci
|
||||
}
|
||||
@@ -75,18 +75,15 @@ func checkCert(cert *x509.Certificate) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := &certlib.FetcherOpts{}
|
||||
|
||||
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
|
||||
flag.BoolVar(&warnOnly, "q", false, "only warn about expiring certs")
|
||||
flag.DurationVar(&leeway, "t", leeway, "warn if certificates are closer than this to expiring")
|
||||
flag.Parse()
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
in, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "failed to read file")
|
||||
continue
|
||||
}
|
||||
|
||||
certs, err := certlib.ParseCertificatesPEM(in)
|
||||
certs, err := certlib.GetCertificateChain(file, opts)
|
||||
if err != nil {
|
||||
_, _ = lib.Warn(err, "while parsing certificates")
|
||||
continue
|
||||
|
||||
53
cmd/certser/main.go
Normal file
53
cmd/certser/main.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
const displayInt lib.HexEncodeMode = iota
|
||||
|
||||
func parseDisplayMode(mode string) lib.HexEncodeMode {
|
||||
mode = strings.ToLower(mode)
|
||||
|
||||
if mode == "int" {
|
||||
return displayInt
|
||||
}
|
||||
|
||||
return lib.ParseHexEncodeMode(mode)
|
||||
}
|
||||
|
||||
func serialString(cert *x509.Certificate, mode lib.HexEncodeMode) string {
|
||||
if mode == displayInt {
|
||||
return cert.SerialNumber.String()
|
||||
}
|
||||
|
||||
return lib.HexEncode(cert.SerialNumber.Bytes(), mode)
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := &certlib.FetcherOpts{}
|
||||
displayAs := flag.String("d", "int", "display mode (int, hex, uhex)")
|
||||
showExpiry := flag.Bool("e", false, "show expiry date")
|
||||
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
|
||||
flag.Parse()
|
||||
|
||||
displayMode := parseDisplayMode(*displayAs)
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
cert, err := certlib.GetCertificate(arg, opts)
|
||||
die.If(err)
|
||||
|
||||
fmt.Printf("%s: %s", arg, serialString(cert, displayMode))
|
||||
if *showExpiry {
|
||||
fmt.Printf(" (%s)", cert.NotAfter.Format("2006-01-02"))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
@@ -29,9 +29,9 @@ func printRevocation(cert *x509.Certificate) {
|
||||
}
|
||||
|
||||
type appConfig struct {
|
||||
caFile, intFile string
|
||||
forceIntermediateBundle bool
|
||||
revexp, verbose bool
|
||||
caFile, intFile string
|
||||
forceIntermediateBundle bool
|
||||
revexp, skipVerify, verbose bool
|
||||
}
|
||||
|
||||
func parseFlags() appConfig {
|
||||
@@ -40,6 +40,7 @@ func parseFlags() appConfig {
|
||||
flag.StringVar(&cfg.intFile, "i", "", "intermediate `bundle`")
|
||||
flag.BoolVar(&cfg.forceIntermediateBundle, "f", false,
|
||||
"force the use of the intermediate bundle, ignoring any intermediates bundled with certificate")
|
||||
flag.BoolVar(&cfg.skipVerify, "k", false, "skip CA verification")
|
||||
flag.BoolVar(&cfg.revexp, "r", false, "print revocation and expiry information")
|
||||
flag.BoolVar(&cfg.verbose, "v", false, "verbose")
|
||||
flag.Parse()
|
||||
@@ -102,12 +103,17 @@ func run(cfg appConfig) error {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [-ca bundle] [-i bundle] cert", lib.ProgName())
|
||||
}
|
||||
|
||||
fileData, err := os.ReadFile(flag.Arg(0))
|
||||
combinedPool, err := certlib.LoadFullCertPool(cfg.caFile, cfg.intFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to build combined pool: %w", err)
|
||||
}
|
||||
|
||||
chain, err := certlib.ParseCertificatesPEM(fileData)
|
||||
opts := &certlib.FetcherOpts{
|
||||
Roots: combinedPool,
|
||||
SkipVerify: cfg.skipVerify,
|
||||
}
|
||||
|
||||
chain, err := certlib.GetCertificateChain(flag.Arg(0), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
@@ -19,14 +20,7 @@ func main() {
|
||||
in, err := os.ReadFile(fileName)
|
||||
die.If(err)
|
||||
|
||||
if p, _ := pem.Decode(in); p != nil {
|
||||
if p.Type != "CERTIFICATE REQUEST" {
|
||||
die.With("INVALID FILE TYPE")
|
||||
}
|
||||
in = p.Bytes
|
||||
}
|
||||
|
||||
csr, err := x509.ParseCertificateRequest(in)
|
||||
csr, _, err := certlib.ParseCSR(in)
|
||||
die.If(err)
|
||||
|
||||
out, err := x509.MarshalPKIXPublicKey(csr.PublicKey)
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1" // #nosec G505
|
||||
@@ -14,8 +13,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
@@ -32,10 +31,10 @@ Usage:
|
||||
ski [-hm] files...
|
||||
|
||||
Flags:
|
||||
-d Hex encoding mode.
|
||||
-h Print this help message.
|
||||
-m All SKIs should match; as soon as an SKI mismatch is found,
|
||||
it is reported.
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -83,28 +82,19 @@ func parse(path string) ([]byte, string, string) {
|
||||
}
|
||||
|
||||
func parseKey(data []byte) ([]byte, string) {
|
||||
privInterface, err := x509.ParsePKCS8PrivateKey(data)
|
||||
priv, err := certlib.ParsePrivateKeyDER(data)
|
||||
if err != nil {
|
||||
privInterface, err = x509.ParsePKCS1PrivateKey(data)
|
||||
if err != nil {
|
||||
privInterface, err = x509.ParseECPrivateKey(data)
|
||||
if err != nil {
|
||||
die.With("couldn't parse private key.")
|
||||
}
|
||||
}
|
||||
die.If(err)
|
||||
}
|
||||
|
||||
var priv crypto.Signer
|
||||
var kt string
|
||||
switch p := privInterface.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
priv = p
|
||||
switch priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
kt = keyTypeRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
priv = p
|
||||
case *ecdsa.PublicKey:
|
||||
kt = keyTypeECDSA
|
||||
default:
|
||||
die.With("unknown private key type %T", privInterface)
|
||||
die.With("unknown private key type %T", priv)
|
||||
}
|
||||
|
||||
public, err := x509.MarshalPKIXPublicKey(priv.Public())
|
||||
@@ -134,7 +124,8 @@ func parseCertificate(data []byte) ([]byte, string) {
|
||||
}
|
||||
|
||||
func parseCSR(data []byte) ([]byte, string) {
|
||||
csr, err := x509.ParseCertificateRequest(data)
|
||||
// Use certlib to support both PEM and DER and to centralize validation.
|
||||
csr, _, err := certlib.ParseCSR(data)
|
||||
die.If(err)
|
||||
|
||||
pub := csr.PublicKey
|
||||
@@ -153,15 +144,8 @@ func parseCSR(data []byte) ([]byte, string) {
|
||||
return public, kt
|
||||
}
|
||||
|
||||
func dumpHex(in []byte) string {
|
||||
var s string
|
||||
var sSb153 strings.Builder
|
||||
for i := range in {
|
||||
sSb153.WriteString(fmt.Sprintf("%02X:", in[i]))
|
||||
}
|
||||
s += sSb153.String()
|
||||
|
||||
return strings.Trim(s, ":")
|
||||
func dumpHex(in []byte, mode lib.HexEncodeMode) string {
|
||||
return lib.HexEncode(in, mode)
|
||||
}
|
||||
|
||||
type subjectPublicKeyInfo struct {
|
||||
@@ -171,10 +155,14 @@ type subjectPublicKeyInfo struct {
|
||||
|
||||
func main() {
|
||||
var help, shouldMatch bool
|
||||
var displayModeString string
|
||||
flag.StringVar(&displayModeString, "d", "lower", "hex encoding mode")
|
||||
flag.BoolVar(&help, "h", false, "print a help message and exit")
|
||||
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
|
||||
flag.Parse()
|
||||
|
||||
displayMode := lib.ParseHexEncodeMode(displayModeString)
|
||||
|
||||
if help {
|
||||
usage(os.Stdout)
|
||||
os.Exit(0)
|
||||
@@ -192,7 +180,7 @@ func main() {
|
||||
}
|
||||
|
||||
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) // #nosec G401 this is the standard
|
||||
pubHashString := dumpHex(pubHash[:])
|
||||
pubHashString := dumpHex(pubHash[:], displayMode)
|
||||
if ski == "" {
|
||||
ski = pubHashString
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/certlib"
|
||||
"git.wntrmute.dev/kyle/goutils/die"
|
||||
)
|
||||
|
||||
@@ -124,34 +125,14 @@ func loadKey(path string) (crypto.Signer, error) {
|
||||
}
|
||||
|
||||
in = bytes.TrimSpace(in)
|
||||
p, _ := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p, _ := pem.Decode(in); p != nil {
|
||||
if !validPEMs[p.Type] {
|
||||
return nil, errors.New("invalid private key file type " + p.Type)
|
||||
}
|
||||
in = p.Bytes
|
||||
return certlib.ParsePrivateKeyPEM(in)
|
||||
}
|
||||
|
||||
priv, err := x509.ParsePKCS8PrivateKey(in)
|
||||
if err != nil {
|
||||
priv, err = x509.ParsePKCS1PrivateKey(in)
|
||||
if err != nil {
|
||||
priv, err = x509.ParseECPrivateKey(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch p := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return p, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return p, nil
|
||||
default:
|
||||
// should never reach here
|
||||
return nil, errors.New("invalid private key")
|
||||
}
|
||||
return certlib.ParsePrivateKeyDER(in)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
1
go.mod
1
go.mod
@@ -15,6 +15,7 @@ require (
|
||||
github.com/benbjohnson/clock v1.3.5
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/certificate-transparency-go v1.0.21
|
||||
rsc.io/qr v0.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
2
go.sum
2
go.sum
@@ -44,3 +44,5 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||
|
||||
129
lib/lib.go
129
lib/lib.go
@@ -2,9 +2,11 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -109,3 +111,130 @@ func Duration(d time.Duration) string {
|
||||
s += fmt.Sprintf("%dh%s", hours, d)
|
||||
return s
|
||||
}
|
||||
|
||||
type HexEncodeMode uint8
|
||||
|
||||
const (
|
||||
// HexEncodeLower prints the bytes as lowercase hexadecimal.
|
||||
HexEncodeLower HexEncodeMode = iota + 1
|
||||
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
|
||||
HexEncodeUpper
|
||||
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
|
||||
// with colons between each pair of bytes.
|
||||
HexEncodeLowerColon
|
||||
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
|
||||
// with colons between each pair of bytes.
|
||||
HexEncodeUpperColon
|
||||
// HexEncodeBytes prints the string as a sequence of []byte.
|
||||
HexEncodeBytes
|
||||
)
|
||||
|
||||
func (m HexEncodeMode) String() string {
|
||||
switch m {
|
||||
case HexEncodeLower:
|
||||
return "lower"
|
||||
case HexEncodeUpper:
|
||||
return "upper"
|
||||
case HexEncodeLowerColon:
|
||||
return "lcolon"
|
||||
case HexEncodeUpperColon:
|
||||
return "ucolon"
|
||||
case HexEncodeBytes:
|
||||
return "bytes"
|
||||
default:
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
}
|
||||
|
||||
func ParseHexEncodeMode(s string) HexEncodeMode {
|
||||
switch strings.ToLower(s) {
|
||||
case "lower":
|
||||
return HexEncodeLower
|
||||
case "upper":
|
||||
return HexEncodeUpper
|
||||
case "lcolon":
|
||||
return HexEncodeLowerColon
|
||||
case "ucolon":
|
||||
return HexEncodeUpperColon
|
||||
case "bytes":
|
||||
return HexEncodeBytes
|
||||
}
|
||||
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
|
||||
func hexColons(s string) string {
|
||||
if len(s)%2 != 0 {
|
||||
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
|
||||
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
|
||||
panic("invalid hex string length")
|
||||
}
|
||||
|
||||
n := len(s)
|
||||
if n <= 2 {
|
||||
return s
|
||||
}
|
||||
|
||||
pairCount := n / 2
|
||||
if n%2 != 0 {
|
||||
pairCount++
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(n + pairCount - 1)
|
||||
|
||||
for i := 0; i < n; i += 2 {
|
||||
b.WriteByte(s[i])
|
||||
|
||||
if i+1 < n {
|
||||
b.WriteByte(s[i+1])
|
||||
}
|
||||
|
||||
if i+2 < n {
|
||||
b.WriteByte(':')
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func hexEncode(b []byte) string {
|
||||
s := hex.EncodeToString(b)
|
||||
|
||||
if len(s)%2 != 0 {
|
||||
s = "0" + s
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func bytesAsByteSliceString(buf []byte) string {
|
||||
sb := &strings.Builder{}
|
||||
sb.WriteString("[]byte{")
|
||||
for i := range buf {
|
||||
fmt.Fprintf(sb, "0x%02x, ", buf[i])
|
||||
}
|
||||
sb.WriteString("}")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// HexEncode encodes the given bytes as a hexadecimal string.
|
||||
func HexEncode(b []byte, mode HexEncodeMode) string {
|
||||
str := hexEncode(b)
|
||||
|
||||
switch mode {
|
||||
case HexEncodeLower:
|
||||
return str
|
||||
case HexEncodeUpper:
|
||||
return strings.ToUpper(str)
|
||||
case HexEncodeLowerColon:
|
||||
return hexColons(str)
|
||||
case HexEncodeUpperColon:
|
||||
return strings.ToUpper(hexColons(str))
|
||||
case HexEncodeBytes:
|
||||
return bytesAsByteSliceString(b)
|
||||
default:
|
||||
panic("invalid hex encode mode")
|
||||
}
|
||||
}
|
||||
|
||||
79
lib/lib_test.go
Normal file
79
lib/lib_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package lib_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/lib"
|
||||
)
|
||||
|
||||
func TestHexEncode_LowerUpper(t *testing.T) {
|
||||
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||
|
||||
gotLower := lib.HexEncode(b, lib.HexEncodeLower)
|
||||
if gotLower != "0fa100ff" {
|
||||
t.Fatalf("lib.HexEncode lower: expected %q, got %q", "0fa100ff", gotLower)
|
||||
}
|
||||
|
||||
gotUpper := lib.HexEncode(b, lib.HexEncodeUpper)
|
||||
if gotUpper != "0FA100FF" {
|
||||
t.Fatalf("lib.HexEncode upper: expected %q, got %q", "0FA100FF", gotUpper)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_ColonModes(t *testing.T) {
|
||||
// Includes leading zero nibble and a zero byte to verify padding and separators
|
||||
b := []byte{0x0f, 0xa1, 0x00, 0xff}
|
||||
|
||||
gotLColon := lib.HexEncode(b, lib.HexEncodeLowerColon)
|
||||
if gotLColon != "0f:a1:00:ff" {
|
||||
t.Fatalf("lib.HexEncode colon lower: expected %q, got %q", "0f:a1:00:ff", gotLColon)
|
||||
}
|
||||
|
||||
gotUColon := lib.HexEncode(b, lib.HexEncodeUpperColon)
|
||||
if gotUColon != "0F:A1:00:FF" {
|
||||
t.Fatalf("lib.HexEncode colon upper: expected %q, got %q", "0F:A1:00:FF", gotUColon)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_EmptyInput(t *testing.T) {
|
||||
var b []byte
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "" {
|
||||
t.Fatalf("empty lower: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "" {
|
||||
t.Fatalf("empty upper: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "" {
|
||||
t.Fatalf("empty colon lower: expected empty string, got %q", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "" {
|
||||
t.Fatalf("empty colon upper: expected empty string, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_SingleByte(t *testing.T) {
|
||||
b := []byte{0x0f}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "0f" {
|
||||
t.Fatalf("single byte lower: expected %q, got %q", "0f", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "0F" {
|
||||
t.Fatalf("single byte upper: expected %q, got %q", "0F", got)
|
||||
}
|
||||
// For a single byte, colon modes should not introduce separators
|
||||
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "0f" {
|
||||
t.Fatalf("single byte colon lower: expected %q, got %q", "0f", got)
|
||||
}
|
||||
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "0F" {
|
||||
t.Fatalf("single byte colon upper: expected %q, got %q", "0F", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexEncode_InvalidModePanics(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("expected panic for invalid mode, but function returned normally")
|
||||
}
|
||||
}()
|
||||
// 0 is not a valid lib.HexEncodeMode (valid modes start at 1)
|
||||
_ = lib.HexEncode([]byte{0x01}, lib.HexEncodeMode(0))
|
||||
}
|
||||
33
twofactor/README.md
Normal file
33
twofactor/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## `twofactor`
|
||||
|
||||
[](https://godoc.org/github.com/gokyle/twofactor)
|
||||
|
||||
|
||||
### Author
|
||||
|
||||
`twofactor` was written by Kyle Isom <kyle@tyrfingr.is>.
|
||||
|
||||
|
||||
### License
|
||||
|
||||
```
|
||||
Copyright (c) 2017 Kyle Isom <kyle@imap.cc>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
```
|
||||
5
twofactor/doc.go
Normal file
5
twofactor/doc.go
Normal file
@@ -0,0 +1,5 @@
|
||||
// Package twofactor implements two-factor authentication.
|
||||
//
|
||||
// Currently supported are RFC 4226 HOTP one-time passwords and
|
||||
// RFC 6238 TOTP SHA-1 one-time passwords.
|
||||
package twofactor
|
||||
103
twofactor/hotp.go
Normal file
103
twofactor/hotp.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/sha1" // #nosec G505 - required by RFC
|
||||
"encoding/base32"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HOTP represents an RFC-4226 Hash-based One Time Password instance.
|
||||
type HOTP struct {
|
||||
*OATH
|
||||
}
|
||||
|
||||
// NewHOTP takes the key, the initial counter value, and the number
|
||||
// of digits (typically 6 or 8) and returns a new HOTP instance.
|
||||
func NewHOTP(key []byte, counter uint64, digits int) *HOTP {
|
||||
return &HOTP{
|
||||
OATH: &OATH{
|
||||
key: key,
|
||||
counter: counter,
|
||||
size: digits,
|
||||
hash: sha1.New,
|
||||
algo: crypto.SHA1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Type returns OATH_HOTP.
|
||||
func (otp *HOTP) Type() Type {
|
||||
return OATH_HOTP
|
||||
}
|
||||
|
||||
// OTP returns the next OTP and increments the counter.
|
||||
func (otp *HOTP) OTP() string {
|
||||
code := otp.OATH.OTP(otp.counter)
|
||||
otp.counter++
|
||||
return code
|
||||
}
|
||||
|
||||
// URL returns an HOTP URL (i.e. for putting in a QR code).
|
||||
func (otp *HOTP) URL(label string) string {
|
||||
return otp.OATH.URL(otp.Type(), label)
|
||||
}
|
||||
|
||||
// SetProvider sets up the provider component of the OTP URL.
|
||||
func (otp *HOTP) SetProvider(provider string) {
|
||||
otp.provider = provider
|
||||
}
|
||||
|
||||
// GenerateGoogleHOTP generates a new HOTP instance as used by
|
||||
// Google Authenticator.
|
||||
func GenerateGoogleHOTP() *HOTP {
|
||||
key := make([]byte, sha1.Size)
|
||||
if _, err := io.ReadFull(PRNG, key); err != nil {
|
||||
return nil
|
||||
}
|
||||
return NewHOTP(key, 0, 6)
|
||||
}
|
||||
|
||||
func hotpFromURL(u *url.URL) (*HOTP, string, error) {
|
||||
label := u.Path[1:]
|
||||
v := u.Query()
|
||||
|
||||
secret := strings.ToUpper(v.Get("secret"))
|
||||
if secret == "" {
|
||||
return nil, "", ErrInvalidURL
|
||||
}
|
||||
|
||||
var digits = 6
|
||||
if sdigit := v.Get("digits"); sdigit != "" {
|
||||
tmpDigits, err := strconv.ParseInt(sdigit, 10, 8)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
digits = int(tmpDigits)
|
||||
}
|
||||
|
||||
var counter uint64
|
||||
if scounter := v.Get("counter"); scounter != "" {
|
||||
var err error
|
||||
counter, err = strconv.ParseUint(scounter, 10, 64)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
key, err := base32.StdEncoding.DecodeString(Pad(secret))
|
||||
if err != nil {
|
||||
// assume secret isn't base32 encoded
|
||||
key = []byte(secret)
|
||||
}
|
||||
otp := NewHOTP(key, counter, digits)
|
||||
return otp, label, nil
|
||||
}
|
||||
|
||||
// QR generates a new QR code for the HOTP.
|
||||
func (otp *HOTP) QR(label string) ([]byte, error) {
|
||||
return otp.OATH.QR(otp.Type(), label)
|
||||
}
|
||||
58
twofactor/hotp_internal_test.go
Normal file
58
twofactor/hotp_internal_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testKey = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
|
||||
|
||||
var rfcHotpKey = []byte("12345678901234567890")
|
||||
var rfcHotpExpected = []string{
|
||||
"755224",
|
||||
"287082",
|
||||
"359152",
|
||||
"969429",
|
||||
"338314",
|
||||
"254676",
|
||||
"287922",
|
||||
"162583",
|
||||
"399871",
|
||||
"520489",
|
||||
}
|
||||
|
||||
// This test runs through the test cases presented in the RFC, and
|
||||
// ensures that this implementation is in compliance.
|
||||
func TestHotpRFC(t *testing.T) {
|
||||
otp := NewHOTP(rfcHotpKey, 0, 6)
|
||||
for i := range rfcHotpExpected {
|
||||
if otp.Counter() != uint64(i) {
|
||||
t.Fatalf("twofactor: invalid counter (should be %d, is %d",
|
||||
i, otp.Counter())
|
||||
}
|
||||
code := otp.OTP()
|
||||
if code == "" {
|
||||
t.Fatal("twofactor: failed to produce an OTP")
|
||||
} else if code != rfcHotpExpected[i] {
|
||||
t.Logf("twofactor: invalid OTP\n")
|
||||
t.Logf("\tExpected: %s\n", rfcHotpExpected[i])
|
||||
t.Logf("\t Actual: %s\n", code)
|
||||
t.Fatalf("\t Counter: %d\n", otp.counter)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This test uses a different key than the test cases in the RFC,
|
||||
// but runs through the same test cases to ensure that they fail as
|
||||
// expected.
|
||||
func TestHotpBadRFC(t *testing.T) {
|
||||
otp := NewHOTP(testKey, 0, 6)
|
||||
for i := range rfcHotpExpected {
|
||||
code := otp.OTP()
|
||||
switch code {
|
||||
case "":
|
||||
t.Error("twofactor: failed to produce an OTP")
|
||||
case rfcHotpExpected[i]:
|
||||
t.Error("twofactor: should not have received a valid OTP")
|
||||
}
|
||||
}
|
||||
}
|
||||
150
twofactor/oath.go
Normal file
150
twofactor/oath.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"rsc.io/qr"
|
||||
)
|
||||
|
||||
const defaultSize = 6
|
||||
|
||||
// OATH provides a baseline structure for the two OATH algorithms.
|
||||
type OATH struct {
|
||||
key []byte
|
||||
counter uint64
|
||||
size int
|
||||
hash func() hash.Hash
|
||||
algo crypto.Hash
|
||||
provider string
|
||||
}
|
||||
|
||||
// Size returns the output size (in characters) of the password.
|
||||
func (o *OATH) Size() int {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Counter returns the OATH token's counter.
|
||||
func (o *OATH) Counter() uint64 {
|
||||
return o.counter
|
||||
}
|
||||
|
||||
// SetCounter updates the OATH token's counter to a new value.
|
||||
func (o *OATH) SetCounter(counter uint64) {
|
||||
o.counter = counter
|
||||
}
|
||||
|
||||
// Key returns the token's secret key.
|
||||
func (o *OATH) Key() []byte {
|
||||
return o.key
|
||||
}
|
||||
|
||||
// Hash returns the token's hash function.
|
||||
func (o *OATH) Hash() func() hash.Hash {
|
||||
return o.hash
|
||||
}
|
||||
|
||||
// URL constructs a URL appropriate for the token (i.e. for use in a
|
||||
// QR code).
|
||||
func (o *OATH) URL(t Type, label string) string {
|
||||
secret := base32.StdEncoding.EncodeToString(o.key)
|
||||
u := url.URL{}
|
||||
v := url.Values{}
|
||||
u.Scheme = "otpauth"
|
||||
switch t {
|
||||
case OATH_HOTP:
|
||||
u.Host = "hotp"
|
||||
case OATH_TOTP:
|
||||
u.Host = "totp"
|
||||
}
|
||||
u.Path = label
|
||||
v.Add("secret", secret)
|
||||
if o.Counter() != 0 && t == OATH_HOTP {
|
||||
v.Add("counter", strconv.FormatUint(o.Counter(), 10))
|
||||
}
|
||||
if o.Size() != defaultSize {
|
||||
v.Add("digits", strconv.Itoa(o.Size()))
|
||||
}
|
||||
|
||||
switch o.algo {
|
||||
case crypto.SHA256:
|
||||
v.Add("algorithm", "SHA256")
|
||||
case crypto.SHA512:
|
||||
v.Add("algorithm", "SHA512")
|
||||
}
|
||||
|
||||
if o.provider != "" {
|
||||
v.Add("provider", o.provider)
|
||||
}
|
||||
|
||||
u.RawQuery = v.Encode()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
var digits = []int64{
|
||||
0: 1,
|
||||
1: 10,
|
||||
2: 100,
|
||||
3: 1000,
|
||||
4: 10000,
|
||||
5: 100000,
|
||||
6: 1000000,
|
||||
7: 10000000,
|
||||
8: 100000000,
|
||||
9: 1000000000,
|
||||
10: 10000000000,
|
||||
}
|
||||
|
||||
// OTP top-level type should provide a counter; for example, HOTP
|
||||
// will provide the counter directly while TOTP will provide the
|
||||
// time-stepped counter.
|
||||
func (o *OATH) OTP(counter uint64) string {
|
||||
var ctr [8]byte
|
||||
binary.BigEndian.PutUint64(ctr[:], counter)
|
||||
|
||||
var mod int64 = 1
|
||||
if len(digits) > o.size {
|
||||
for i := 1; i <= o.size; i++ {
|
||||
mod *= 10
|
||||
}
|
||||
} else {
|
||||
mod = digits[o.size]
|
||||
}
|
||||
|
||||
h := hmac.New(o.hash, o.key)
|
||||
h.Write(ctr[:])
|
||||
dt := truncate(h.Sum(nil)) % mod
|
||||
fmtStr := fmt.Sprintf("%%0%dd", o.size)
|
||||
return fmt.Sprintf(fmtStr, dt)
|
||||
}
|
||||
|
||||
// truncate contains the DT function from the RFC; this is used to
|
||||
// deterministically select a sequence of 4 bytes from the HMAC
|
||||
// counter hash.
|
||||
func truncate(in []byte) int64 {
|
||||
offset := int(in[len(in)-1] & 0xF)
|
||||
p := in[offset : offset+4]
|
||||
var binCode int32
|
||||
binCode = int32((p[0] & 0x7f)) << 24
|
||||
binCode += int32((p[1] & 0xff)) << 16
|
||||
binCode += int32((p[2] & 0xff)) << 8
|
||||
binCode += int32((p[3] & 0xff))
|
||||
return int64(binCode) & 0x7FFFFFFF
|
||||
}
|
||||
|
||||
// QR generates a byte slice containing the a QR code encoded as a
|
||||
// PNG with level Q error correction.
|
||||
func (o *OATH) QR(t Type, label string) ([]byte, error) {
|
||||
u := o.URL(t, label)
|
||||
code, err := qr.Encode(u, qr.Q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return code.PNG(), nil
|
||||
}
|
||||
27
twofactor/oath_internal_test.go
Normal file
27
twofactor/oath_internal_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sha1Hmac = []byte{
|
||||
0x1f, 0x86, 0x98, 0x69, 0x0e,
|
||||
0x02, 0xca, 0x16, 0x61, 0x85,
|
||||
0x50, 0xef, 0x7f, 0x19, 0xda,
|
||||
0x8e, 0x94, 0x5b, 0x55, 0x5a,
|
||||
}
|
||||
|
||||
var truncExpect int64 = 0x50ef7f19
|
||||
|
||||
// This test runs through the truncation example given in the RFC.
|
||||
func TestTruncate(t *testing.T) {
|
||||
if result := truncate(sha1Hmac); result != truncExpect {
|
||||
t.Fatalf("hotp: expected truncate -> %d, saw %d\n",
|
||||
truncExpect, result)
|
||||
}
|
||||
|
||||
sha1Hmac[19]++
|
||||
if result := truncate(sha1Hmac); result == truncExpect {
|
||||
t.Fatal("hotp: expected truncation to fail")
|
||||
}
|
||||
}
|
||||
86
twofactor/otp.go
Normal file
86
twofactor/otp.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Type uint
|
||||
|
||||
const (
|
||||
OATH_HOTP = iota
|
||||
OATH_TOTP
|
||||
)
|
||||
|
||||
// PRNG is an io.Reader that provides a cryptographically secure
|
||||
// random byte stream.
|
||||
var PRNG = rand.Reader
|
||||
|
||||
var (
|
||||
ErrInvalidURL = errors.New("twofactor: invalid URL")
|
||||
ErrInvalidAlgo = errors.New("twofactor: invalid algorithm")
|
||||
)
|
||||
|
||||
// OTP represents a one-time password token -- whether a
|
||||
// software taken (as in the case of Google Authenticator) or a
|
||||
// hardware token (as in the case of a YubiKey).
|
||||
type OTP interface {
|
||||
// Returns the current counter value; the meaning of the
|
||||
// returned value is algorithm-specific.
|
||||
Counter() uint64
|
||||
|
||||
// Set the counter to a specific value.
|
||||
SetCounter(uint64)
|
||||
|
||||
// the secret key contained in the OTP
|
||||
Key() []byte
|
||||
|
||||
// generate a new OTP
|
||||
OTP() string
|
||||
|
||||
// the output size of the OTP
|
||||
Size() int
|
||||
|
||||
// the hash function used by the OTP
|
||||
Hash() func() hash.Hash
|
||||
|
||||
// Returns the type of this OTP.
|
||||
Type() Type
|
||||
}
|
||||
|
||||
func otpString(otp OTP) string {
|
||||
var typeName string
|
||||
switch otp.Type() {
|
||||
case OATH_HOTP:
|
||||
typeName = "OATH-HOTP"
|
||||
case OATH_TOTP:
|
||||
typeName = "OATH-TOTP"
|
||||
default:
|
||||
typeName = "UNKNOWN"
|
||||
}
|
||||
return fmt.Sprintf("%s, %d", typeName, otp.Size())
|
||||
}
|
||||
|
||||
// FromURL constructs a new OTP token from a URL string.
|
||||
func FromURL(otpURL string) (OTP, string, error) {
|
||||
u, err := url.Parse(otpURL)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if u.Scheme != "otpauth" {
|
||||
return nil, "", ErrInvalidURL
|
||||
}
|
||||
|
||||
switch u.Host {
|
||||
case "totp":
|
||||
return totpFromURL(u)
|
||||
case "hotp":
|
||||
return hotpFromURL(u)
|
||||
default:
|
||||
return nil, "", ErrInvalidURL
|
||||
}
|
||||
}
|
||||
126
twofactor/otp_internal_test.go
Normal file
126
twofactor/otp_internal_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHOTPString(t *testing.T) {
|
||||
hotp := NewHOTP(nil, 0, 6)
|
||||
hotpString := otpString(hotp)
|
||||
if hotpString != "OATH-HOTP, 6" {
|
||||
t.Fatal("twofactor: invalid OTP string")
|
||||
}
|
||||
}
|
||||
|
||||
// This test generates a new OTP, outputs the URL for that OTP,
|
||||
// and attempts to parse that URL. It verifies that the two OTPs
|
||||
// are the same, and that they produce the same output.
|
||||
func TestURL(t *testing.T) {
|
||||
var ident = "testuser@foo"
|
||||
otp := NewHOTP(testKey, 0, 6)
|
||||
url := otp.URL("testuser@foo")
|
||||
otp2, id, err := FromURL(url)
|
||||
switch {
|
||||
case err != nil:
|
||||
t.Fatal("hotp: failed to parse HOTP URL\n")
|
||||
case id != ident:
|
||||
t.Logf("hotp: bad label\n")
|
||||
t.Logf("\texpected: %s\n", ident)
|
||||
t.Fatalf("\t actual: %s\n", id)
|
||||
case otp2.Counter() != otp.Counter():
|
||||
t.Logf("hotp: OTP counters aren't synced\n")
|
||||
t.Logf("\toriginal: %d\n", otp.Counter())
|
||||
t.Fatalf("\t second: %d\n", otp2.Counter())
|
||||
}
|
||||
|
||||
code1 := otp.OTP()
|
||||
code2 := otp2.OTP()
|
||||
if code1 != code2 {
|
||||
t.Logf("hotp: mismatched OTPs\n")
|
||||
t.Logf("\texpected: %s\n", code1)
|
||||
t.Fatalf("\t actual: %s\n", code2)
|
||||
}
|
||||
|
||||
// There's not much we can do test the QR code, except to
|
||||
// ensure it doesn't fail.
|
||||
_, err = otp.QR(ident)
|
||||
if err != nil {
|
||||
t.Fatalf("hotp: failed to generate QR code PNG (%v)\n", err)
|
||||
}
|
||||
|
||||
// This should fail because the maximum size of an alphanumeric
|
||||
// QR code with the lowest-level of error correction should
|
||||
// max out at 4296 bytes. 8k may be a bit overkill... but it
|
||||
// gets the job done. The value is read from the PRNG to
|
||||
// increase the likelihood that the returned data is
|
||||
// uncompressible.
|
||||
var tooBigIdent = make([]byte, 8192)
|
||||
_, err = io.ReadFull(PRNG, tooBigIdent)
|
||||
if err != nil {
|
||||
t.Fatalf("hotp: failed to read identity (%v)\n", err)
|
||||
} else if _, err = otp.QR(string(tooBigIdent)); err == nil {
|
||||
t.Fatal("hotp: QR code should fail to encode oversized URL")
|
||||
}
|
||||
}
|
||||
|
||||
// This test makes sure we can generate codes for padded and non-padded
|
||||
// entries.
|
||||
func TestPaddedURL(t *testing.T) {
|
||||
var urlList = []string{
|
||||
"otpauth://hotp/?secret=ME",
|
||||
"otpauth://hotp/?secret=MEFR",
|
||||
"otpauth://hotp/?secret=MFRGG",
|
||||
"otpauth://hotp/?secret=MFRGGZA",
|
||||
"otpauth://hotp/?secret=a6mryljlbufszudtjdt42nh5by=======",
|
||||
"otpauth://hotp/?secret=a6mryljlbufszudtjdt42nh5by",
|
||||
"otpauth://hotp/?secret=a6mryljlbufszudtjdt42nh5by%3D%3D%3D%3D%3D%3D%3D",
|
||||
}
|
||||
var codeList = []string{
|
||||
"413198",
|
||||
"770938",
|
||||
"670717",
|
||||
"402378",
|
||||
"069864",
|
||||
"069864",
|
||||
"069864",
|
||||
}
|
||||
|
||||
for i := range urlList {
|
||||
if o, id, err := FromURL(urlList[i]); err != nil {
|
||||
t.Log("hotp: URL should have parsed successfully (id=", id, ")")
|
||||
t.Logf("\turl was: %s\n", urlList[i])
|
||||
t.Fatalf("\t%s, %s\n", o.OTP(), id)
|
||||
} else {
|
||||
code2 := o.OTP()
|
||||
if code2 != codeList[i] {
|
||||
t.Logf("hotp: mismatched OTPs\n")
|
||||
t.Logf("\texpected: %s\n", codeList[i])
|
||||
t.Fatalf("\t actual: %s\n", code2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This test attempts a variety of invalid urls against the parser
|
||||
// to ensure they fail.
|
||||
func TestBadURL(t *testing.T) {
|
||||
var urlList = []string{
|
||||
"http://google.com",
|
||||
"",
|
||||
"-",
|
||||
"foo",
|
||||
"otpauth:/foo/bar/baz",
|
||||
"://",
|
||||
"otpauth://hotp/?digits=",
|
||||
"otpauth://hotp/?secret=MFRGGZDF&digits=ABCD",
|
||||
"otpauth://hotp/?secret=MFRGGZDF&counter=ABCD",
|
||||
}
|
||||
|
||||
for i := range urlList {
|
||||
if _, _, err := FromURL(urlList[i]); err == nil {
|
||||
t.Log("hotp: URL should not have parsed successfully")
|
||||
t.Fatalf("\turl was: %s\n", urlList[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
172
twofactor/totp.go
Normal file
172
twofactor/totp.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/sha1" // #nosec G505 - required by RFC
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base32"
|
||||
"hash"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
var timeSource = clock.New()
|
||||
|
||||
// TOTP represents an RFC 6238 Time-based One-Time Password instance.
|
||||
type TOTP struct {
|
||||
*OATH
|
||||
step uint64
|
||||
}
|
||||
|
||||
// NewTOTP takes a new key, a starting time, a step, the number of
|
||||
// digits of output (typically 6 or 8) and the hash algorithm to
|
||||
// use, and builds a new OTP.
|
||||
func NewTOTP(key []byte, start uint64, step uint64, digits int, algo crypto.Hash) *TOTP {
|
||||
h := hashFromAlgo(algo)
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &TOTP{
|
||||
OATH: &OATH{
|
||||
key: key,
|
||||
counter: start,
|
||||
size: digits,
|
||||
hash: h,
|
||||
algo: algo,
|
||||
},
|
||||
step: step,
|
||||
}
|
||||
}
|
||||
|
||||
// NewGoogleTOTP takes a secret as a base32-encoded string and
|
||||
// returns an appropriate Google Authenticator TOTP instance.
|
||||
func NewGoogleTOTP(secret string) (*TOTP, error) {
|
||||
key, err := base32.StdEncoding.DecodeString(secret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTOTP(key, 0, 30, 6, crypto.SHA1), nil
|
||||
}
|
||||
|
||||
// NewTOTPSHA1 will build a new TOTP using SHA-1.
|
||||
func NewTOTPSHA1(key []byte, start uint64, step uint64, digits int) *TOTP {
|
||||
return NewTOTP(key, start, step, digits, crypto.SHA1)
|
||||
}
|
||||
|
||||
// Type returns OATH_TOTP.
|
||||
func (otp *TOTP) Type() Type {
|
||||
return OATH_TOTP
|
||||
}
|
||||
|
||||
func (otp *TOTP) otp(counter uint64) string {
|
||||
return otp.OATH.OTP(counter)
|
||||
}
|
||||
|
||||
// OTP returns the OTP for the current timestep.
|
||||
func (otp *TOTP) OTP() string {
|
||||
return otp.otp(otp.OTPCounter())
|
||||
}
|
||||
|
||||
// URL returns a TOTP URL (i.e. for putting in a QR code).
|
||||
func (otp *TOTP) URL(label string) string {
|
||||
return otp.OATH.URL(otp.Type(), label)
|
||||
}
|
||||
|
||||
// SetProvider sets up the provider component of the OTP URL.
|
||||
func (otp *TOTP) SetProvider(provider string) {
|
||||
otp.provider = provider
|
||||
}
|
||||
|
||||
func (otp *TOTP) otpCounter(t uint64) uint64 {
|
||||
return (t - otp.counter) / otp.step
|
||||
}
|
||||
|
||||
// OTPCounter returns the current time value for the OTP.
|
||||
func (otp *TOTP) OTPCounter() uint64 {
|
||||
return otp.otpCounter(uint64(timeSource.Now().Unix() & 0x7FFFFFFF)) //#nosec G115 - masked out overflow bits
|
||||
}
|
||||
|
||||
func hashFromAlgo(algo crypto.Hash) func() hash.Hash {
|
||||
switch algo {
|
||||
case crypto.SHA1:
|
||||
return sha1.New
|
||||
case crypto.SHA256:
|
||||
return sha256.New
|
||||
case crypto.SHA512:
|
||||
return sha512.New
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateGoogleTOTP produces a new TOTP token with the defaults expected by
|
||||
// Google Authenticator.
|
||||
func GenerateGoogleTOTP() *TOTP {
|
||||
key := make([]byte, sha1.Size)
|
||||
if _, err := io.ReadFull(PRNG, key); err != nil {
|
||||
return nil
|
||||
}
|
||||
return NewTOTP(key, 0, 30, 6, crypto.SHA1)
|
||||
}
|
||||
|
||||
func totpFromURL(u *url.URL) (*TOTP, string, error) {
|
||||
label := u.Path[1:]
|
||||
v := u.Query()
|
||||
|
||||
secret := strings.ToUpper(v.Get("secret"))
|
||||
if secret == "" {
|
||||
return nil, "", ErrInvalidURL
|
||||
}
|
||||
|
||||
var algo = crypto.SHA1
|
||||
if algorithm := v.Get("algorithm"); algorithm != "" {
|
||||
switch {
|
||||
case strings.EqualFold(algorithm, "SHA256"):
|
||||
algo = crypto.SHA256
|
||||
case strings.EqualFold(algorithm, "SHA512"):
|
||||
algo = crypto.SHA512
|
||||
case !strings.EqualFold(algorithm, "SHA1"):
|
||||
return nil, "", ErrInvalidAlgo
|
||||
}
|
||||
}
|
||||
|
||||
var digits = 6
|
||||
if sdigit := v.Get("digits"); sdigit != "" {
|
||||
tmpDigits, err := strconv.ParseInt(sdigit, 10, 8)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
digits = int(tmpDigits)
|
||||
}
|
||||
|
||||
var period uint64 = 30
|
||||
if speriod := v.Get("period"); speriod != "" {
|
||||
var err error
|
||||
period, err = strconv.ParseUint(speriod, 10, 64)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
key, err := base32.StdEncoding.DecodeString(Pad(secret))
|
||||
if err != nil {
|
||||
// assume secret isn't base32 encoded
|
||||
key = []byte(secret)
|
||||
}
|
||||
otp := NewTOTP(key, 0, period, digits, algo)
|
||||
return otp, label, nil
|
||||
}
|
||||
|
||||
// QR generates a new TOTP QR code.
|
||||
func (otp *TOTP) QR(label string) ([]byte, error) {
|
||||
return otp.OATH.QR(otp.Type(), label)
|
||||
}
|
||||
|
||||
func SetClock(c clock.Clock) {
|
||||
timeSource = c
|
||||
}
|
||||
85
twofactor/totp_internal_test.go
Normal file
85
twofactor/totp_internal_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
var rfcTotpKey = map[crypto.Hash][]byte{
|
||||
crypto.SHA1: []byte("12345678901234567890"),
|
||||
crypto.SHA256: []byte("12345678901234567890123456789012"),
|
||||
crypto.SHA512: []byte("1234567890123456789012345678901234567890123456789012345678901234"),
|
||||
}
|
||||
|
||||
var rfcTotpStep uint64 = 30
|
||||
|
||||
var rfcTotpTests = []struct {
|
||||
Time uint64
|
||||
Code string
|
||||
T uint64
|
||||
Algo crypto.Hash
|
||||
}{
|
||||
{59, "94287082", 1, crypto.SHA1},
|
||||
{59, "46119246", 1, crypto.SHA256},
|
||||
{59, "90693936", 1, crypto.SHA512},
|
||||
{1111111109, "07081804", 37037036, crypto.SHA1},
|
||||
{1111111109, "68084774", 37037036, crypto.SHA256},
|
||||
{1111111109, "25091201", 37037036, crypto.SHA512},
|
||||
{1111111111, "14050471", 37037037, crypto.SHA1},
|
||||
{1111111111, "67062674", 37037037, crypto.SHA256},
|
||||
{1111111111, "99943326", 37037037, crypto.SHA512},
|
||||
{1234567890, "89005924", 41152263, crypto.SHA1},
|
||||
{1234567890, "91819424", 41152263, crypto.SHA256},
|
||||
{1234567890, "93441116", 41152263, crypto.SHA512},
|
||||
{2000000000, "69279037", 66666666, crypto.SHA1},
|
||||
{2000000000, "90698825", 66666666, crypto.SHA256},
|
||||
{2000000000, "38618901", 66666666, crypto.SHA512},
|
||||
{20000000000, "65353130", 666666666, crypto.SHA1},
|
||||
{20000000000, "77737706", 666666666, crypto.SHA256},
|
||||
{20000000000, "47863826", 666666666, crypto.SHA512},
|
||||
}
|
||||
|
||||
func TestTotpRFC(t *testing.T) {
|
||||
for _, tc := range rfcTotpTests {
|
||||
otp := NewTOTP(rfcTotpKey[tc.Algo], 0, rfcTotpStep, 8, tc.Algo)
|
||||
if otp.otpCounter(tc.Time) != tc.T {
|
||||
t.Logf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
|
||||
t.Logf("\texpected: %d\n", tc.T)
|
||||
t.Errorf("\t actual: %d\n", otp.otpCounter(tc.Time))
|
||||
}
|
||||
|
||||
if code := otp.otp(otp.otpCounter(tc.Time)); code != tc.Code {
|
||||
t.Logf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
|
||||
t.Logf("\texpected: %s\n", tc.Code)
|
||||
t.Errorf("\t actual: %s\n", code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTPTime(t *testing.T) {
|
||||
otp := GenerateGoogleTOTP()
|
||||
|
||||
testClock := clock.NewMock()
|
||||
testClock.Add(2 * time.Minute)
|
||||
SetClock(testClock)
|
||||
|
||||
code := otp.OTP()
|
||||
|
||||
testClock.Add(-1 * time.Minute)
|
||||
if newCode := otp.OTP(); newCode == code {
|
||||
t.Errorf("twofactor: TOTP: previous code %s shouldn't match code %s", newCode, code)
|
||||
}
|
||||
|
||||
testClock.Add(2 * time.Minute)
|
||||
if newCode := otp.OTP(); newCode == code {
|
||||
t.Errorf("twofactor: TOTP: future code %s shouldn't match code %s", newCode, code)
|
||||
}
|
||||
|
||||
testClock.Add(-1 * time.Minute)
|
||||
if newCode := otp.OTP(); newCode != code {
|
||||
t.Errorf("twofactor: TOTP: current code %s shouldn't match code %s", newCode, code)
|
||||
}
|
||||
}
|
||||
16
twofactor/util.go
Normal file
16
twofactor/util.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package twofactor
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Pad calculates the number of '='s to add to our encoded string
|
||||
// to make base32.StdEncoding.DecodeString happy.
|
||||
func Pad(s string) string {
|
||||
if !strings.HasSuffix(s, "=") && len(s)%8 != 0 {
|
||||
for len(s)%8 != 0 {
|
||||
s += "="
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
51
twofactor/util_test.go
Normal file
51
twofactor/util_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package twofactor_test
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.wntrmute.dev/kyle/goutils/twofactor"
|
||||
)
|
||||
|
||||
const letters = "1234567890!@#$%^&*()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func randString() string {
|
||||
b := make([]byte, rand.Intn(len(letters)))
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
}
|
||||
return base32.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
func TestPadding(t *testing.T) {
|
||||
for range 300 {
|
||||
b := randString()
|
||||
origEncoding := b
|
||||
modEncoding := strings.ReplaceAll(b, "=", "")
|
||||
str, err := base32.StdEncoding.DecodeString(origEncoding)
|
||||
if err != nil {
|
||||
t.Fatal("Can't decode: ", b)
|
||||
}
|
||||
|
||||
paddedEncoding := twofactor.Pad(modEncoding)
|
||||
if origEncoding != paddedEncoding {
|
||||
t.Log("Padding failed:")
|
||||
t.Logf("Expected: '%s'", origEncoding)
|
||||
t.Fatalf("Got: '%s'", paddedEncoding)
|
||||
} else {
|
||||
var mstr []byte
|
||||
mstr, err = base32.StdEncoding.DecodeString(paddedEncoding)
|
||||
if err != nil {
|
||||
t.Fatal("Can't decode: ", paddedEncoding)
|
||||
}
|
||||
|
||||
if string(mstr) != string(str) {
|
||||
t.Log("Re-padding failed:")
|
||||
t.Logf("Expected: '%s'", str)
|
||||
t.Fatalf("Got: '%s'", mstr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user