Compare commits

...

23 Commits

Author SHA1 Message Date
bb7749efd1 Update CHANGELOG for v1.13.6. 2025-11-18 14:13:10 -08:00
a3a8115279 goreleaser: remove gitea parts. 2025-11-18 14:12:58 -08:00
8ca8538268 Update CHANGELOG for v1.13.5. 2025-11-18 13:42:44 -08:00
155c49cc5e build: gitea is pulling the repo information from github. 2025-11-18 13:42:28 -08:00
dda9fd9f07 Bump CHANGELOG for v1.13.4. 2025-11-18 13:37:12 -08:00
c251c1e1b5 fix up goreleaser config. 2025-11-18 13:35:47 -08:00
6eb533f79b Tweak goreleaser config. 2025-11-18 13:23:20 -08:00
ea5ffa4828 Update CHANGELOG for v1.13.3. 2025-11-18 13:11:29 -08:00
aa96e47112 update goreleaser config 2025-11-18 13:01:23 -08:00
d34a417dce fixups 2025-11-18 12:37:15 -08:00
d11e0cf9f9 lib: add byte slice output for HexEncode 2025-11-18 11:58:43 -08:00
aad7d68599 cmd/ski: update display mode 2025-11-18 11:46:58 -08:00
4560868688 cmd: switch programs over to certlib.Fetcher. 2025-11-18 11:08:17 -08:00
8d5406256f cmd/certdump: use certlib.Fetcher. 2025-11-17 19:49:42 -08:00
9280e846fa certlib: add Fetcher
Fetcher is an interface and set of functions for retrieving a
certificate (or chain of certificates) from a spec. It will
determine whether the certificate spec is a file or a server,
and fetch accordingly.
2025-11-17 19:48:57 -08:00
0a71661901 CHANGELOG: bump to v1.13.2. 2025-11-17 15:50:51 -08:00
804f53d27d Refactor bundling into separate package. 2025-11-17 15:08:10 -08:00
cfb80355bb Update CHANGELOG for v1.13.1. 2025-11-17 10:08:05 -08:00
77160395a0 Cleaning up a few things. 2025-11-17 10:07:03 -08:00
37d5e04421 Adding Dockerfile 2025-11-17 09:03:43 -08:00
dc54eeacbc Remove cert bundles generated in testdata. 2025-11-17 08:36:31 -08:00
e2a3081ce5 cmd: add certser command. 2025-11-17 07:18:46 -08:00
3149d958f4 cmd: add certser 2025-11-17 06:55:20 -08:00
29 changed files with 1951 additions and 1094 deletions

35
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: Release
on:
push:
tags:
- 'v*'
workflow_dispatch: {}
permissions:
contents: write
jobs:
goreleaser:
name: GoReleaser
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: true
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

4
.gitignore vendored
View File

@@ -1 +1,5 @@
.idea
cmd/cert-bundler/testdata/pkg/*
# Added by goreleaser init:
dist/
cmd/cert-bundler/testdata/bundle/

View File

@@ -390,6 +390,9 @@ linters:
- 3
- 4
- 8
- 24
- 30
- 365
nakedret:
# Make an issue if func has more lines of code than this setting, and it has naked returns.

445
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,445 @@
# This is an example .goreleaser.yml file with some sensible defaults.
# Make sure to check the documentation at https://goreleaser.com
# The lines below are called `modelines`. See `:help modeline`
# Feel free to remove those if you don't want/need to use them.
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
version: 2
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
# you may remove this if you don't need go generate
- go generate ./...
builds:
- id: atping
main: ./cmd/atping/main.go
binary: atping
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: ca-signed
main: ./cmd/ca-signed/main.go
binary: ca-signed
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: cert-bundler
main: ./cmd/cert-bundler/main.go
binary: cert-bundler
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: cert-revcheck
main: ./cmd/cert-revcheck/main.go
binary: cert-revcheck
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: certchain
main: ./cmd/certchain/main.go
binary: certchain
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: certdump
main: ./cmd/certdump/main.go
binary: certdump
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: certexpiry
main: ./cmd/certexpiry/main.go
binary: certexpiry
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: certser
main: ./cmd/certser/main.go
binary: certser
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: certverify
main: ./cmd/certverify/main.go
binary: certverify
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: clustersh
main: ./cmd/clustersh/main.go
binary: clustersh
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: cruntar
main: ./cmd/cruntar/main.go
binary: cruntar
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: csrpubdump
main: ./cmd/csrpubdump/main.go
binary: csrpubdump
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: data_sync
main: ./cmd/data_sync/main.go
binary: data_sync
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: diskimg
main: ./cmd/diskimg/main.go
binary: diskimg
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: dumpbytes
main: ./cmd/dumpbytes/main.go
binary: dumpbytes
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: eig
main: ./cmd/eig/main.go
binary: eig
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: fragment
main: ./cmd/fragment/main.go
binary: fragment
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: host
main: ./cmd/host/main.go
binary: host
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: jlp
main: ./cmd/jlp/main.go
binary: jlp
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: kgz
main: ./cmd/kgz/main.go
binary: kgz
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: minmax
main: ./cmd/minmax/main.go
binary: minmax
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: parts
main: ./cmd/parts/main.go
binary: parts
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: pem2bin
main: ./cmd/pem2bin/main.go
binary: pem2bin
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: pembody
main: ./cmd/pembody/main.go
binary: pembody
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: pemit
main: ./cmd/pemit/main.go
binary: pemit
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: readchain
main: ./cmd/readchain/main.go
binary: readchain
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: renfnv
main: ./cmd/renfnv/main.go
binary: renfnv
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: rhash
main: ./cmd/rhash/main.go
binary: rhash
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: rolldie
main: ./cmd/rolldie/main.go
binary: rolldie
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: showimp
main: ./cmd/showimp/main.go
binary: showimp
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: ski
main: ./cmd/ski/main.go
binary: ski
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: sprox
main: ./cmd/sprox/main.go
binary: sprox
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: stealchain
main: ./cmd/stealchain/main.go
binary: stealchain
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: stealchain-server
main: ./cmd/stealchain-server/main.go
binary: stealchain-server
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: subjhash
main: ./cmd/subjhash/main.go
binary: subjhash
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: tlsinfo
main: ./cmd/tlsinfo/main.go
binary: tlsinfo
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: tlskeypair
main: ./cmd/tlskeypair/main.go
binary: tlskeypair
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: utc
main: ./cmd/utc/main.go
binary: utc
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: yamll
main: ./cmd/yamll/main.go
binary: yamll
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
- id: zsearch
main: ./cmd/zsearch/main.go
binary: zsearch
env:
- CGO_ENABLED=0
goos: [linux, darwin]
goarch: [amd64, arm64]
ignore:
- goos: darwin
goarch: amd64
archives:
- formats: [tar.gz]
# archive filename: name_version_os_arch
name_template: >-
{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
formats: [zip]
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
release:
github:
owner: kisom
name: goutils
footer: >-
---
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).

View File

@@ -1,5 +1,56 @@
CHANGELOG
v1.13.6 - 2025-11-18
Changed:
- build: removing gitea stuff.
v1.13.5 - 2025-11-18
Changed:
- build: updating goreleaser config.
v1.13.4 - 2025-11-18
Changed:
- build: updating goreleaser config.
v1.13.3 - 2025-11-18
Added:
- certlib: introduce `Fetcher` for retrieving certificates.
- lib: `HexEncode` gains a byte-slice output variant.
- build: add GoReleaser configuration.
Changed:
- cmd: migrate programs to use `certlib.Fetcher` for certificate retrieval
(includes `certdump`, `ski`, and others).
- cmd/ski: update display mode.
Misc:
- repository fixups and small cleanups.
v1.13.2 - 2025-11-17
Add:
- certlib/bundler: refactor certificate bundling from cmd/cert-bundler
into a separate package.
Changed:
- cmd/cert-bundler: refactor to use bundler package, and update Dockerfile.
v1.13.1 - 2025-11-17
Add:
- Dockerfile for cert-bundler.
v1.13.0 - 2025-11-16
Add:
- cmd/certser: print serial numbers for certificates.
- lib/HexEncode: add a new hex encode function handling multiple output
formats, including with and without colons.
v1.12.4 - 2025-11-16
Changed:

View File

@@ -91,7 +91,7 @@ func TestReset(t *testing.T) {
}
}
const decay = 25 * time.Millisecond
const decay = time.Second
const maxDuration = 10 * time.Millisecond
const interval = time.Millisecond

677
certlib/bundler/bundler.go Normal file
View File

@@ -0,0 +1,677 @@
package bundler
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"time"
"gopkg.in/yaml.v2"
"git.wntrmute.dev/kyle/goutils/certlib"
)
const defaultFileMode = 0644
// Config represents the top-level YAML configuration.
type Config struct {
Config struct {
Hashes string `yaml:"hashes"`
Expiry string `yaml:"expiry"`
} `yaml:"config"`
Chains map[string]ChainGroup `yaml:"chains"`
}
// ChainGroup represents a named group of certificate chains.
type ChainGroup struct {
Certs []CertChain `yaml:"certs"`
Outputs Outputs `yaml:"outputs"`
}
// CertChain represents a root certificate and its intermediates.
type CertChain struct {
Root string `yaml:"root"`
Intermediates []string `yaml:"intermediates"`
}
// Outputs defines output format options.
type Outputs struct {
IncludeSingle bool `yaml:"include_single"`
IncludeIndividual bool `yaml:"include_individual"`
Manifest bool `yaml:"manifest"`
Formats []string `yaml:"formats"`
Encoding string `yaml:"encoding"`
}
var formatExtensions = map[string]string{
"zip": ".zip",
"tgz": ".tar.gz",
}
// Run performs the bundling operation given a config file path and an output directory.
func Run(configFile string, outputDir string) error {
if configFile == "" {
return errors.New("configuration file required")
}
cfg, err := loadConfig(configFile)
if err != nil {
return fmt.Errorf("loading config: %w", err)
}
expiryDuration := 365 * 24 * time.Hour
if cfg.Config.Expiry != "" {
expiryDuration, err = parseDuration(cfg.Config.Expiry)
if err != nil {
return fmt.Errorf("parsing expiry: %w", err)
}
}
if err = os.MkdirAll(outputDir, 0750); err != nil {
return fmt.Errorf("creating output directory: %w", err)
}
totalFormats := 0
for _, group := range cfg.Chains {
totalFormats += len(group.Outputs.Formats)
}
createdFiles := make([]string, 0, totalFormats)
for groupName, group := range cfg.Chains {
files, perr := processChainGroup(groupName, group, expiryDuration, outputDir)
if perr != nil {
return fmt.Errorf("processing chain group %s: %w", groupName, perr)
}
createdFiles = append(createdFiles, files...)
}
if cfg.Config.Hashes != "" {
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
return fmt.Errorf("generating hash file: %w", gerr)
}
}
return nil
}
func loadConfig(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
return nil, uerr
}
return &cfg, nil
}
func parseDuration(s string) (time.Duration, error) {
// Support simple formats like "1y", "6m", "30d"
if len(s) < 2 {
return 0, fmt.Errorf("invalid duration format: %s", s)
}
unit := s[len(s)-1]
value := s[:len(s)-1]
var multiplier time.Duration
switch unit {
case 'y', 'Y':
multiplier = 365 * 24 * time.Hour
case 'm', 'M':
multiplier = 30 * 24 * time.Hour
case 'd', 'D':
multiplier = 24 * time.Hour
default:
return time.ParseDuration(s)
}
var num int
_, err := fmt.Sscanf(value, "%d", &num)
if err != nil {
return 0, fmt.Errorf("invalid duration value: %s", s)
}
return time.Duration(num) * multiplier, nil
}
func processChainGroup(
groupName string,
group ChainGroup,
expiryDuration time.Duration,
outputDir string,
) ([]string, error) {
// Default encoding to "pem" if not specified
encoding := group.Outputs.Encoding
if encoding == "" {
encoding = "pem"
}
// Collect certificates from all chains in the group
singleFileCerts, individualCerts, sourcePaths, err := loadAndCollectCerts(
group.Certs,
group.Outputs,
expiryDuration,
)
if err != nil {
return nil, err
}
// Prepare files for inclusion in archives
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, sourcePaths, group.Outputs, encoding)
if err != nil {
return nil, err
}
// Create archives for the entire group
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles, outputDir)
if err != nil {
return nil, err
}
return createdFiles, nil
}
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
func loadAndCollectCerts(
chains []CertChain,
outputs Outputs,
expiryDuration time.Duration,
) ([]*x509.Certificate, []certWithPath, []string, error) {
var singleFileCerts []*x509.Certificate
var individualCerts []certWithPath
var sourcePaths []string
for _, chain := range chains {
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
if cerr != nil {
return nil, nil, nil, cerr
}
if len(s) > 0 {
singleFileCerts = append(singleFileCerts, s...)
}
if len(i) > 0 {
individualCerts = append(individualCerts, i...)
}
// Record source paths for timestamp preservation
// Only append when loading succeeded
sourcePaths = append(sourcePaths, chain.Root)
sourcePaths = append(sourcePaths, chain.Intermediates...)
}
return singleFileCerts, individualCerts, sourcePaths, nil
}
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
func collectFromChain(
chain CertChain,
outputs Outputs,
expiryDuration time.Duration,
) (
[]*x509.Certificate,
[]certWithPath,
error,
) {
var single []*x509.Certificate
var indiv []certWithPath
// Load root certificate
rootCert, rerr := certlib.LoadCertificate(chain.Root)
if rerr != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
single = append(single, rootCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, lerr := certlib.LoadCertificate(intPath)
if lerr != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
}
// Validate that intermediate is signed by root
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
return nil, nil, fmt.Errorf(
"intermediate %s is not properly signed by root %s: %w",
intPath,
chain.Root,
sigErr,
)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
single = append(single, intCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
}
}
return single, indiv, nil
}
// prepareArchiveFiles prepares all files to be included in archives.
func prepareArchiveFiles(
singleFileCerts []*x509.Certificate,
individualCerts []certWithPath,
sourcePaths []string,
outputs Outputs,
encoding string,
) ([]fileEntry, error) {
var archiveFiles []fileEntry
// Track used filenames to avoid collisions inside archives
usedNames := make(map[string]int)
// Handle a single bundle file
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
bundleTime := maxModTime(sourcePaths)
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
if err != nil {
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
}
for i := range files {
files[i].name = makeUniqueName(files[i].name, usedNames)
files[i].modTime = bundleTime
// Best-effort: we do not have a portable birth/creation time.
// Use the same timestamp for created time to track deterministically.
files[i].createTime = bundleTime
}
archiveFiles = append(archiveFiles, files...)
}
// Handle individual files
if outputs.IncludeIndividual {
for _, cp := range individualCerts {
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
if err != nil {
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
}
mt := fileModTime(cp.path)
for i := range files {
files[i].name = makeUniqueName(files[i].name, usedNames)
files[i].modTime = mt
files[i].createTime = mt
}
archiveFiles = append(archiveFiles, files...)
}
}
// Generate manifest if requested
if outputs.Manifest {
manifestContent := generateManifest(archiveFiles)
manifestName := makeUniqueName("MANIFEST", usedNames)
mt := maxModTime(sourcePaths)
archiveFiles = append(archiveFiles, fileEntry{
name: manifestName,
content: manifestContent,
modTime: mt,
createTime: mt,
})
}
return archiveFiles, nil
}
// createArchiveFiles creates archive files in the specified formats.
func createArchiveFiles(
groupName string,
formats []string,
archiveFiles []fileEntry,
outputDir string,
) ([]string, error) {
createdFiles := make([]string, 0, len(formats))
for _, format := range formats {
ext, ok := formatExtensions[format]
if !ok {
return nil, fmt.Errorf("unsupported format: %s", format)
}
archivePath := filepath.Join(outputDir, groupName+ext)
switch format {
case "zip":
if err := createZipArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create zip archive: %w", err)
}
case "tgz":
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
}
default:
return nil, fmt.Errorf("unsupported format: %s", format)
}
createdFiles = append(createdFiles, archivePath)
}
return createdFiles, nil
}
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
now := time.Now()
expiryThreshold := now.Add(expiryDuration)
if cert.NotAfter.Before(expiryThreshold) {
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
if daysUntilExpiry < 0 {
fmt.Fprintf(
os.Stderr,
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
path,
-daysUntilExpiry,
)
} else {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
}
}
}
type fileEntry struct {
name string
content []byte
modTime time.Time
createTime time.Time
}
type certWithPath struct {
cert *x509.Certificate
path string
}
// encodeCertsToFiles converts certificates to file entries based on encoding type
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
func encodeCertsToFiles(
certs []*x509.Certificate,
baseName string,
encoding string,
isSingle bool,
) ([]fileEntry, error) {
var files []fileEntry
switch encoding {
case "pem":
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
case "der":
if isSingle {
// For single file in DER, concatenate all cert DER bytes
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
// Individual DER file (should only have one cert)
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
case "both":
// Add PEM version
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
// Add DER version
if isSingle {
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
default:
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
}
return files, nil
}
// encodeCertsToPEM encodes certificates to PEM format.
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
var pemContent []byte
for _, cert := range certs {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
}
return pemContent
}
func generateManifest(files []fileEntry) []byte {
// Build a sorted list of files by filename to ensure deterministic manifest ordering
sorted := make([]fileEntry, 0, len(files))
for _, f := range files {
// Defensive: skip any existing manifest entry
if f.name == "MANIFEST" {
continue
}
sorted = append(sorted, f)
}
sort.Slice(sorted, func(i, j int) bool { return sorted[i].name < sorted[j].name })
var manifest strings.Builder
for _, file := range sorted {
hash := sha256.Sum256(file.content)
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
}
return []byte(manifest.String())
}
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
func closeWithErr(baseErr error, closers ...io.Closer) error {
for _, c := range closers {
if c == nil {
continue
}
if cerr := c.Close(); cerr != nil {
baseErr = errors.Join(baseErr, cerr)
}
}
return baseErr
}
func createZipArchive(path string, files []fileEntry) error {
f, zerr := os.Create(path)
if zerr != nil {
return zerr
}
w := zip.NewWriter(f)
for _, file := range files {
hdr := &zip.FileHeader{
Name: file.name,
Method: zip.Deflate,
}
if !file.modTime.IsZero() {
hdr.SetModTime(file.modTime)
}
fw, werr := w.CreateHeader(hdr)
if werr != nil {
return closeWithErr(werr, w, f)
}
if _, werr = fw.Write(file.content); werr != nil {
return closeWithErr(werr, w, f)
}
}
// Check errors on close operations
if cerr := w.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func createTarGzArchive(path string, files []fileEntry) error {
f, terr := os.Create(path)
if terr != nil {
return terr
}
gw := gzip.NewWriter(f)
tw := tar.NewWriter(gw)
for _, file := range files {
hdr := &tar.Header{
Name: file.name,
Uid: 0,
Gid: 0,
Mode: defaultFileMode,
Size: int64(len(file.content)),
ModTime: func() time.Time {
if file.modTime.IsZero() {
return time.Now()
}
return file.modTime
}(),
}
// Set additional times if supported
hdr.AccessTime = hdr.ModTime
if !file.createTime.IsZero() {
hdr.ChangeTime = file.createTime
} else {
hdr.ChangeTime = hdr.ModTime
}
if herr := tw.WriteHeader(hdr); herr != nil {
return closeWithErr(herr, tw, gw, f)
}
if _, werr := tw.Write(file.content); werr != nil {
return closeWithErr(werr, tw, gw, f)
}
}
// Check errors on close operations in the correct order
if cerr := tw.Close(); cerr != nil {
_ = gw.Close()
_ = f.Close()
return cerr
}
if cerr := gw.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func generateHashFile(path string, files []string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
for _, file := range files {
data, rerr := os.ReadFile(file)
if rerr != nil {
return rerr
}
hash := sha256.Sum256(data)
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
}
return nil
}
// makeUniqueName ensures that each file name within the archive is unique by appending
// an incremental numeric suffix before the extension when collisions occur.
// Example: "root.pem" -> "root-2.pem", "root-3.pem", etc.
func makeUniqueName(name string, used map[string]int) string {
// If unused, mark and return as-is
if _, ok := used[name]; !ok {
used[name] = 1
return name
}
ext := filepath.Ext(name)
base := strings.TrimSuffix(name, ext)
// Track a counter per base+ext key
key := base + ext
counter := max(used[key], 1)
for {
counter++
candidate := fmt.Sprintf("%s-%d%s", base, counter, ext)
if _, exists := used[candidate]; !exists {
used[key] = counter
used[candidate] = 1
return candidate
}
}
}
// fileModTime returns the file's modification time, or time.Now() if stat fails.
func fileModTime(path string) time.Time {
fi, err := os.Stat(path)
if err != nil {
return time.Now()
}
return fi.ModTime()
}
// maxModTime returns the latest modification time across provided paths.
// If the list is empty or stats fail, returns time.Now().
func maxModTime(paths []string) time.Time {
var zero time.Time
maxTime := zero
for _, p := range paths {
fi, err := os.Stat(p)
if err != nil {
continue
}
mt := fi.ModTime()
if maxTime.IsZero() || mt.After(maxTime) {
maxTime = mt
}
}
if maxTime.IsZero() {
return time.Now()
}
return maxTime
}

175
certlib/fetch.go Normal file
View File

@@ -0,0 +1,175 @@
package certlib
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
"os"
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
"git.wntrmute.dev/kyle/goutils/fileutil"
"git.wntrmute.dev/kyle/goutils/lib"
)
// FetcherOpts are options for fetching certificates. They are only applicable to ServerFetcher.
type FetcherOpts struct {
SkipVerify bool
Roots *x509.CertPool
}
// Fetcher is an interface for fetching certificates from a remote source. It
// currently supports fetching from a server or a file.
type Fetcher interface {
Get() (*x509.Certificate, error)
GetChain() ([]*x509.Certificate, error)
String() string
}
type ServerFetcher struct {
host string
port int
insecure bool
roots *x509.CertPool
}
// WithRoots sets the roots for the ServerFetcher.
func WithRoots(roots *x509.CertPool) func(*ServerFetcher) {
return func(sf *ServerFetcher) {
sf.roots = roots
}
}
// WithSkipVerify sets the insecure flag for the ServerFetcher.
func WithSkipVerify() func(*ServerFetcher) {
return func(sf *ServerFetcher) {
sf.insecure = true
}
}
// ParseServer parses a server string into a ServerFetcher. It can be a URL or a
// a host:port pair.
func ParseServer(host string) (*ServerFetcher, error) {
target, err := hosts.ParseHost(host)
if err != nil {
return nil, fmt.Errorf("failed to parse server: %w", err)
}
return &ServerFetcher{
host: target.Host,
port: target.Port,
}, nil
}
func (sf *ServerFetcher) String() string {
return fmt.Sprintf("tls://%s", net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1)))
}
func (sf *ServerFetcher) GetChain() ([]*x509.Certificate, error) {
config := &tls.Config{
InsecureSkipVerify: sf.insecure, // #nosec G402 - no shit sherlock
RootCAs: sf.roots,
}
dialer := &tls.Dialer{
Config: config,
}
hostSpec := net.JoinHostPort(sf.host, lib.Itoa(sf.port, -1))
netConn, err := dialer.DialContext(context.Background(), "tcp", hostSpec)
if err != nil {
return nil, fmt.Errorf("dialing server: %w", err)
}
conn, ok := netConn.(*tls.Conn)
if !ok {
return nil, errors.New("connection is not TLS")
}
defer conn.Close()
state := conn.ConnectionState()
return state.PeerCertificates, nil
}
func (sf *ServerFetcher) Get() (*x509.Certificate, error) {
certs, err := sf.GetChain()
if err != nil {
return nil, err
}
return certs[0], nil
}
type FileFetcher struct {
path string
}
func NewFileFetcher(path string) *FileFetcher {
return &FileFetcher{
path: path,
}
}
func (ff *FileFetcher) String() string {
return ff.path
}
func (ff *FileFetcher) GetChain() ([]*x509.Certificate, error) {
if ff.path == "-" {
certData, err := io.ReadAll(os.Stdin)
if err != nil {
return nil, fmt.Errorf("failed to read from stdin: %w", err)
}
return ParseCertificatesPEM(certData)
}
certs, err := LoadCertificates(ff.path)
if err != nil {
return nil, fmt.Errorf("failed to load chain: %w", err)
}
return certs, nil
}
func (ff *FileFetcher) Get() (*x509.Certificate, error) {
certs, err := ff.GetChain()
if err != nil {
return nil, err
}
return certs[0], nil
}
// GetCertificateChain fetches a certificate chain from a remote source.
func GetCertificateChain(spec string, opts *FetcherOpts) ([]*x509.Certificate, error) {
if fileutil.FileDoesExist(spec) {
return NewFileFetcher(spec).GetChain()
}
fetcher, err := ParseServer(spec)
if err != nil {
return nil, err
}
if opts != nil {
fetcher.insecure = opts.SkipVerify
fetcher.roots = opts.Roots
}
return fetcher.GetChain()
}
// GetCertificate fetches the first certificate from a certificate chain.
func GetCertificate(spec string, opts *FetcherOpts) (*x509.Certificate, error) {
certs, err := GetCertificateChain(spec, opts)
if err != nil {
return nil, err
}
return certs[0], nil
}

View File

@@ -396,6 +396,45 @@ func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, e
return certs, rest, nil
}
// LoadFullCertPool returns a certificate pool with roots and intermediates
// from disk. If no roots are provided, the system root pool will be used.
func LoadFullCertPool(roots, intermediates string) (*x509.CertPool, error) {
var err error
pool := x509.NewCertPool()
if roots == "" {
pool, err = x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("loading system cert pool: %w", err)
}
} else {
var rootCerts []*x509.Certificate
rootCerts, err = LoadCertificates(roots)
if err != nil {
return nil, fmt.Errorf("loading roots: %w", err)
}
for _, cert := range rootCerts {
pool.AddCert(cert)
}
}
if intermediates != "" {
var intCerts []*x509.Certificate
intCerts, err = LoadCertificates(intermediates)
if err != nil {
return nil, fmt.Errorf("loading intermediates: %w", err)
}
for _, cert := range intCerts {
pool.AddCert(cert)
}
}
return pool, nil
}
// LoadPEMCertPool loads a pool of PEM certificates from file.
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
if certsFile == "" {

View File

@@ -26,7 +26,12 @@ func parseURL(host string) (string, int, error) {
return "", 0, fmt.Errorf("certlib/hosts: invalid host: %s", host)
}
if strings.ToLower(url.Scheme) != "https" {
switch strings.ToLower(url.Scheme) {
case "https":
// OK
case "tls":
// OK
default:
return "", 0, errors.New("certlib/hosts: only https scheme supported")
}
@@ -43,28 +48,28 @@ func parseURL(host string) (string, int, error) {
}
func parseHostPort(host string) (string, int, error) {
host, sport, err := net.SplitHostPort(host)
shost, sport, err := net.SplitHostPort(host)
if err == nil {
portInt, err2 := strconv.ParseInt(sport, 10, 16)
if err2 != nil {
return "", 0, fmt.Errorf("certlib/hosts: invalid port: %s", sport)
}
return host, int(portInt), nil
return shost, int(portInt), nil
}
return host, defaultHTTPSPort, nil
}
func ParseHost(host string) (*Target, error) {
host, port, err := parseURL(host)
uhost, port, err := parseURL(host)
if err == nil {
return &Target{Host: host, Port: port}, nil
return &Target{Host: uhost, Port: port}, nil
}
host, port, err = parseHostPort(host)
shost, port, err := parseHostPort(host)
if err == nil {
return &Target{Host: host, Port: port}, nil
return &Target{Host: shost, Port: port}, nil
}
return nil, fmt.Errorf("certlib/hosts: invalid host: %s", host)

View File

@@ -0,0 +1,35 @@
package hosts_test
import (
"testing"
"git.wntrmute.dev/kyle/goutils/certlib/hosts"
)
type testCase struct {
Host string
Target hosts.Target
}
var testCases = []testCase{
{Host: "server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
{Host: "server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
{Host: "tls://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
{Host: "https://server-name", Target: hosts.Target{Host: "server-name", Port: 443}},
{Host: "https://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
{Host: "tls://server-name:8443", Target: hosts.Target{Host: "server-name", Port: 8443}},
{Host: "https://server-name/something/else", Target: hosts.Target{Host: "server-name", Port: 443}},
}
func TestParseHost(t *testing.T) {
for i, tc := range testCases {
target, err := hosts.ParseHost(tc.Host)
if err != nil {
t.Fatalf("test case %d: %s", i+1, err)
}
if target.Host != tc.Target.Host {
t.Fatalf("test case %d: got host '%s', want host '%s'", i+1, target.Host, tc.Target.Host)
}
}
}

View File

@@ -0,0 +1,28 @@
# Build and runtime image for cert-bundler
# Usage (from repo root or cmd/cert-bundler directory):
# docker build -t cert-bundler:latest -f cmd/cert-bundler/Dockerfile .
# docker run --rm -v "$PWD":/work cert-bundler:latest
# This expects a /work/bundle.yaml file in the mounted directory and
# will write generated bundles to /work/bundle.
# Build stage
FROM golang:1.24.3-alpine AS build
WORKDIR /src
# Copy go module files and download dependencies first for better caching
RUN go install git.wntrmute.dev/kyle/goutils/cmd/cert-bundler@v1.13.2 && \
mv /go/bin/cert-bundler /usr/local/bin/cert-bundler
# Runtime stage (kept as golang:alpine per requirement)
FROM golang:1.24.3-alpine
# Create a work directory that users will typically mount into
WORKDIR /work
VOLUME ["/work"]
# Copy the built binary from the builder stage
COPY --from=build /usr/local/bin/cert-bundler /usr/local/bin/cert-bundler
# Default command: read bundle.yaml from current directory and output to ./bundle
ENTRYPOINT ["/usr/local/bin/cert-bundler"]
CMD ["-c", "/work/bundle.yaml", "-o", "/work/bundle"]

View File

@@ -1,66 +1,19 @@
package main
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"crypto/x509"
_ "embed"
"encoding/pem"
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"git.wntrmute.dev/kyle/goutils/certlib"
"gopkg.in/yaml.v2"
"git.wntrmute.dev/kyle/goutils/certlib/bundler"
)
// Config represents the top-level YAML configuration.
type Config struct {
Config struct {
Hashes string `yaml:"hashes"`
Expiry string `yaml:"expiry"`
} `yaml:"config"`
Chains map[string]ChainGroup `yaml:"chains"`
}
// ChainGroup represents a named group of certificate chains.
type ChainGroup struct {
Certs []CertChain `yaml:"certs"`
Outputs Outputs `yaml:"outputs"`
}
// CertChain represents a root certificate and its intermediates.
type CertChain struct {
Root string `yaml:"root"`
Intermediates []string `yaml:"intermediates"`
}
// Outputs defines output format options.
type Outputs struct {
IncludeSingle bool `yaml:"include_single"`
IncludeIndividual bool `yaml:"include_individual"`
Manifest bool `yaml:"manifest"`
Formats []string `yaml:"formats"`
Encoding string `yaml:"encoding"`
}
var (
configFile string
outputDir string
)
var formatExtensions = map[string]string{
"zip": ".zip",
"tgz": ".tar.gz",
}
//go:embed README.txt
var readmeContent string
@@ -79,497 +32,10 @@ func main() {
os.Exit(1)
}
// Load and parse configuration
cfg, err := loadConfig(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
if err := bundler.Run(configFile, outputDir); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Parse expiry duration (default 1 year)
expiryDuration := 365 * 24 * time.Hour
if cfg.Config.Expiry != "" {
expiryDuration, err = parseDuration(cfg.Config.Expiry)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
os.Exit(1)
}
}
// Create output directory if it doesn't exist
err = os.MkdirAll(outputDir, 0750)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
os.Exit(1)
}
// Process each chain group
// Pre-allocate createdFiles based on total number of formats across all groups
totalFormats := 0
for _, group := range cfg.Chains {
totalFormats += len(group.Outputs.Formats)
}
createdFiles := make([]string, 0, totalFormats)
for groupName, group := range cfg.Chains {
files, perr := processChainGroup(groupName, group, expiryDuration)
if perr != nil {
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
os.Exit(1)
}
createdFiles = append(createdFiles, files...)
}
// Generate hash file for all created archives
if cfg.Config.Hashes != "" {
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
os.Exit(1)
}
}
fmt.Println("Certificate bundling completed successfully")
}
func loadConfig(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
return nil, uerr
}
return &cfg, nil
}
func parseDuration(s string) (time.Duration, error) {
// Support simple formats like "1y", "6m", "30d"
if len(s) < 2 {
return 0, fmt.Errorf("invalid duration format: %s", s)
}
unit := s[len(s)-1]
value := s[:len(s)-1]
var multiplier time.Duration
switch unit {
case 'y', 'Y':
multiplier = 365 * 24 * time.Hour
case 'm', 'M':
multiplier = 30 * 24 * time.Hour
case 'd', 'D':
multiplier = 24 * time.Hour
default:
return time.ParseDuration(s)
}
var num int
_, err := fmt.Sscanf(value, "%d", &num)
if err != nil {
return 0, fmt.Errorf("invalid duration value: %s", s)
}
return time.Duration(num) * multiplier, nil
}
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
// Default encoding to "pem" if not specified
encoding := group.Outputs.Encoding
if encoding == "" {
encoding = "pem"
}
// Collect certificates from all chains in the group
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
if err != nil {
return nil, err
}
// Prepare files for inclusion in archives
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
if err != nil {
return nil, err
}
// Create archives for the entire group
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
if err != nil {
return nil, err
}
return createdFiles, nil
}
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
func loadAndCollectCerts(
chains []CertChain,
outputs Outputs,
expiryDuration time.Duration,
) ([]*x509.Certificate, []certWithPath, error) {
var singleFileCerts []*x509.Certificate
var individualCerts []certWithPath
for _, chain := range chains {
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
if cerr != nil {
return nil, nil, cerr
}
if len(s) > 0 {
singleFileCerts = append(singleFileCerts, s...)
}
if len(i) > 0 {
individualCerts = append(individualCerts, i...)
}
}
return singleFileCerts, individualCerts, nil
}
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
func collectFromChain(
chain CertChain,
outputs Outputs,
expiryDuration time.Duration,
) (
[]*x509.Certificate,
[]certWithPath,
error,
) {
var single []*x509.Certificate
var indiv []certWithPath
// Load root certificate
rootCert, rerr := certlib.LoadCertificate(chain.Root)
if rerr != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
single = append(single, rootCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, lerr := certlib.LoadCertificate(intPath)
if lerr != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
}
// Validate that intermediate is signed by root
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
return nil, nil, fmt.Errorf(
"intermediate %s is not properly signed by root %s: %w",
intPath,
chain.Root,
sigErr,
)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
single = append(single, intCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
}
}
return single, indiv, nil
}
// prepareArchiveFiles prepares all files to be included in archives.
func prepareArchiveFiles(
singleFileCerts []*x509.Certificate,
individualCerts []certWithPath,
outputs Outputs,
encoding string,
) ([]fileEntry, error) {
var archiveFiles []fileEntry
// Handle a single bundle file
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
if err != nil {
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
}
archiveFiles = append(archiveFiles, files...)
}
// Handle individual files
if outputs.IncludeIndividual {
for _, cp := range individualCerts {
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
if err != nil {
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
}
archiveFiles = append(archiveFiles, files...)
}
}
// Generate manifest if requested
if outputs.Manifest {
manifestContent := generateManifest(archiveFiles)
archiveFiles = append(archiveFiles, fileEntry{
name: "MANIFEST",
content: manifestContent,
})
}
return archiveFiles, nil
}
// createArchiveFiles creates archive files in the specified formats.
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
createdFiles := make([]string, 0, len(formats))
for _, format := range formats {
ext, ok := formatExtensions[format]
if !ok {
return nil, fmt.Errorf("unsupported format: %s", format)
}
archivePath := filepath.Join(outputDir, groupName+ext)
switch format {
case "zip":
if err := createZipArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create zip archive: %w", err)
}
case "tgz":
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
}
default:
return nil, fmt.Errorf("unsupported format: %s", format)
}
createdFiles = append(createdFiles, archivePath)
}
return createdFiles, nil
}
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
now := time.Now()
expiryThreshold := now.Add(expiryDuration)
if cert.NotAfter.Before(expiryThreshold) {
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
if daysUntilExpiry < 0 {
fmt.Fprintf(
os.Stderr,
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
path,
-daysUntilExpiry,
)
} else {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
}
}
}
type fileEntry struct {
name string
content []byte
}
type certWithPath struct {
cert *x509.Certificate
path string
}
// encodeCertsToFiles converts certificates to file entries based on encoding type
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
func encodeCertsToFiles(
certs []*x509.Certificate,
baseName string,
encoding string,
isSingle bool,
) ([]fileEntry, error) {
var files []fileEntry
switch encoding {
case "pem":
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
case "der":
if isSingle {
// For single file in DER, concatenate all cert DER bytes
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
// Individual DER file (should only have one cert)
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
case "both":
// Add PEM version
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
// Add DER version
if isSingle {
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
default:
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
}
return files, nil
}
// encodeCertsToPEM encodes certificates to PEM format.
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
var pemContent []byte
for _, cert := range certs {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
}
return pemContent
}
func generateManifest(files []fileEntry) []byte {
var manifest strings.Builder
for _, file := range files {
if file.name == "MANIFEST" {
continue
}
hash := sha256.Sum256(file.content)
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
}
return []byte(manifest.String())
}
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
func closeWithErr(baseErr error, closers ...io.Closer) error {
for _, c := range closers {
if c == nil {
continue
}
if cerr := c.Close(); cerr != nil {
baseErr = errors.Join(baseErr, cerr)
}
}
return baseErr
}
func createZipArchive(path string, files []fileEntry) error {
f, zerr := os.Create(path)
if zerr != nil {
return zerr
}
w := zip.NewWriter(f)
for _, file := range files {
fw, werr := w.Create(file.name)
if werr != nil {
return closeWithErr(werr, w, f)
}
if _, werr = fw.Write(file.content); werr != nil {
return closeWithErr(werr, w, f)
}
}
// Check errors on close operations
if cerr := w.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func createTarGzArchive(path string, files []fileEntry) error {
f, terr := os.Create(path)
if terr != nil {
return terr
}
gw := gzip.NewWriter(f)
tw := tar.NewWriter(gw)
for _, file := range files {
hdr := &tar.Header{
Name: file.name,
Mode: 0644,
Size: int64(len(file.content)),
}
if herr := tw.WriteHeader(hdr); herr != nil {
return closeWithErr(herr, tw, gw, f)
}
if _, werr := tw.Write(file.content); werr != nil {
return closeWithErr(werr, tw, gw, f)
}
}
// Check errors on close operations in the correct order
if cerr := tw.Close(); cerr != nil {
_ = gw.Close()
_ = f.Close()
return cerr
}
if cerr := gw.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func generateHashFile(path string, files []string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
for _, file := range files {
data, rerr := os.ReadFile(file)
if rerr != nil {
return rerr
}
hash := sha256.Sum256(data)
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
}
return nil
}

View File

@@ -1,197 +0,0 @@
This project is an exploration into the utility of Jetbrains' Junie
to write smaller but tedious programs.
Task: build a certificate bundling tool in cmd/cert-bundler. It
creates archives of certificates chains.
A YAML file for this looks something like:
``` yaml
config:
hashes: bundle.sha256
expiry: 1y
chains:
core_certs:
certs:
- root: roots/core-ca.pem
intermediates:
- int/cca1.pem
- int/cca2.pem
- int/cca3.pem
- root: roots/ssh-ca.pem
intermediates:
- ssh/ssh_dmz1.pem
- ssh/ssh_internal.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
```
Some requirements:
1. First, all the certificates should be loaded.
2. For each root, each of the indivudal intermediates should be
checked to make sure they are properly signed by the root CA.
3. The program should optionally take an expiration period (defaulting
to one year), specified in config.expiration, and if any certificate
is within that expiration period, a warning should be printed.
4. If outputs.include_single is true, all certificates under chains
should be concatenated into a single file.
5. If outputs.include_individual is true, all certificates under
chains should be included at the root level (e.g. int/cca2.pem
would be cca2.pem in the archive).
6. If bundle.manifest is true, a "MANIFEST" file is created with
SHA256 sums of each file included in the archive.
7. For each of the formats, create an archive file in the output
directory (specified with `-o`) with that format.
- If zip is included, create a .zip file.
- If tgz is included, create a .tar.gz file with default compression
levels.
- All archive files should include any generated files (single
and/or individual) in the top-level directory.
8. In the output directory, create a file with the same name as
config.hashes that contains the SHA256 sum of all files created.
-----
The outputs.include_single and outputs.include_individual describe
what should go in the final archive. If both are specified, the output
archive should include both a single bundle.pem and each individual
certificate, for example.
-----
As it stands, given the following `bundle.yaml`:
``` yaml
config:
hashes: bundle.sha256
expiry: 1y
chains:
core_certs:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
- root: pems/isrg-root-x1.pem
intermediates:
- pems/le-e7.pem
outputs:
include_single: true
include_individual: false
manifest: true
formats:
- zip
- tgz
google_certs:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
outputs:
include_single: true
include_individual: false
manifest: true
formats:
- tgz
lets_encrypt:
certs:
- root: pems/isrg-root-x1.pem
intermediates:
- pems/le-e7.pem
outputs:
include_single: false
include_individual: true
manifest: false
formats:
- zip
```
The program outputs the following files:
- bundle.sha256
- core_certs_0.tgz (contains individual certs)
- core_certs_0.zip (contains individual certs)
- core_certs_1.tgz (contains core_certs.pem)
- core_certs_1.zip (contains core_certs.pem)
- google_certs_0.tgz
- lets_encrypt_0.zip
It should output
- bundle.sha256
- core_certs.tgz
- core_certs.zip
- google_certs.tgz
- lets_encrypt.zip
core_certs.* should contain `bundle.pem` and all the individual
certs. There should be no _$n$ variants of archives.
-----
Add an additional field to outputs: encoding. It should accept one of
`der`, `pem`, or `both`. If `der`, certificates should be output as a
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
should be output as a `.pem` file containing a PEM-encoded certificate.
If both, both the `.crt` and `.pem` certificate should be included.
For example, given the previous config, if `encoding` is der, the
google_certs.tgz archive should contain
- bundle.crt
- MANIFEST
Or with lets_encrypt.zip:
- isrg-root-x1.crt
- le-e7.crt
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
- isrg-root-x1.pem
- le-e7.pem
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
- isrg-root-x1.crt
- isrg-root-x1.pem
- le-e7.crt
- le-e7.pem
-----
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
-----
Move the format extensions to a global variable.
-----
Write a README.txt with a description of the bundle.yaml format.
Additionally, update the help text for the program (e.g. with `-h`)
to provide the same detailed information.
-----
It may be easier to embed the README.txt in the program on build.
-----
For the archive (tar.gz and zip) writers, make sure errors are
checked at the end, and don't just defer the close operations.

View File

@@ -2,6 +2,19 @@ config:
hashes: bundle.sha256
expiry: 1y
chains:
weird:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
- root: pems/isrg-root-x1.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
core_certs:
certs:
- root: pems/gts-r1.pem

View File

@@ -1,4 +0,0 @@
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -2,27 +2,151 @@
package main
import (
"bytes"
"context"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"flag"
"fmt"
"io"
"os"
"sort"
"strings"
"github.com/kr/text"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/lib"
)
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
// "\2: \1,")
const (
sSHA256 = "SHA256"
sSHA512 = "SHA512"
)
var keyUsage = map[x509.KeyUsage]string{
x509.KeyUsageDigitalSignature: "digital signature",
x509.KeyUsageContentCommitment: "content committment",
x509.KeyUsageKeyEncipherment: "key encipherment",
x509.KeyUsageKeyAgreement: "key agreement",
x509.KeyUsageDataEncipherment: "data encipherment",
x509.KeyUsageCertSign: "cert sign",
x509.KeyUsageCRLSign: "crl sign",
x509.KeyUsageEncipherOnly: "encipher only",
x509.KeyUsageDecipherOnly: "decipher only",
}
var extKeyUsages = map[x509.ExtKeyUsage]string{
x509.ExtKeyUsageAny: "any",
x509.ExtKeyUsageServerAuth: "server auth",
x509.ExtKeyUsageClientAuth: "client auth",
x509.ExtKeyUsageCodeSigning: "code signing",
x509.ExtKeyUsageEmailProtection: "s/mime",
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
x509.ExtKeyUsageIPSECUser: "ipsec user",
x509.ExtKeyUsageTimeStamping: "timestamping",
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
}
func sigAlgoPK(a x509.SignatureAlgorithm) string {
switch a {
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
return "RSA"
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
return "RSA-PSS"
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
return "ECDSA"
case x509.DSAWithSHA1, x509.DSAWithSHA256:
return "DSA"
case x509.PureEd25519:
return "Ed25519"
case x509.UnknownSignatureAlgorithm:
return "unknown public key algorithm"
default:
return "unknown public key algorithm"
}
}
func sigAlgoHash(a x509.SignatureAlgorithm) string {
switch a {
case x509.MD2WithRSA:
return "MD2"
case x509.MD5WithRSA:
return "MD5"
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
return "SHA1"
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
return sSHA256
case x509.SHA256WithRSAPSS:
return sSHA256
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
return "SHA384"
case x509.SHA384WithRSAPSS:
return "SHA384"
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
return sSHA512
case x509.SHA512WithRSAPSS:
return sSHA512
case x509.PureEd25519:
return sSHA512
case x509.UnknownSignatureAlgorithm:
return "unknown hash algorithm"
default:
return "unknown hash algorithm"
}
}
const maxLine = 78
func makeIndent(n int) string {
s := " "
var sSb97 strings.Builder
for range n {
sSb97.WriteString(" ")
}
s += sSb97.String()
return s
}
func indentLen(n int) int {
return 4 + (8 * n)
}
// this isn't real efficient, but that's not a problem here.
func wrap(s string, indent int) string {
if indent > 3 {
indent = 3
}
wrapped := text.Wrap(s, maxLine)
lines := strings.SplitN(wrapped, "\n", 2)
if len(lines) == 1 {
return lines[0]
}
if (maxLine - indentLen(indent)) <= 0 {
panic("too much indentation")
}
rest := strings.Join(lines[1:], " ")
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
}
func dumpHex(in []byte) string {
return lib.HexEncode(in, lib.HexEncodeUpperColon)
}
func certPublic(cert *x509.Certificate) string {
switch pub := cert.PublicKey.(type) {
case *rsa.PublicKey:
@@ -116,7 +240,7 @@ func showBasicConstraints(cert *x509.Certificate) {
fmt.Fprint(os.Stdout, " (basic constraint failure)")
}
} else {
fmt.Fprint(os.Stdout, "is not a CA certificate")
fmt.Fprint(os.Stdout, ", is not a CA certificate")
if cert.KeyUsage&x509.KeyUsageKeyEncipherment != 0 {
fmt.Fprint(os.Stdout, " (key encipherment usage enabled!)")
}
@@ -220,122 +344,6 @@ func displayCert(cert *x509.Certificate) {
}
}
func displayAllCerts(in []byte, leafOnly bool) {
certs, err := certlib.ParseCertificatesPEM(in)
if err != nil {
certs, _, err = certlib.ParseCertificatesDER(in, "")
if err != nil {
_, _ = lib.Warn(err, "failed to parse certificates")
return
}
}
if len(certs) == 0 {
_, _ = lib.Warnx("no certificates found")
return
}
if leafOnly {
displayCert(certs[0])
return
}
for i := range certs {
displayCert(certs[i])
}
}
func displayAllCertsWeb(uri string, leafOnly bool) {
ci := getConnInfo(uri)
d := &tls.Dialer{Config: permissiveConfig()}
nc, err := d.DialContext(context.Background(), "tcp", ci.Addr)
if err != nil {
_, _ = lib.Warn(err, "couldn't connect to %s", ci.Addr)
return
}
conn, ok := nc.(*tls.Conn)
if !ok {
_, _ = lib.Warnx("invalid TLS connection (not a *tls.Conn)")
return
}
defer conn.Close()
state := conn.ConnectionState()
if err = conn.Close(); err != nil {
_, _ = lib.Warn(err, "couldn't close TLS connection")
}
d = &tls.Dialer{Config: verifyConfig(ci.Host)}
nc, err = d.DialContext(context.Background(), "tcp", ci.Addr)
if err == nil {
conn, ok = nc.(*tls.Conn)
if !ok {
_, _ = lib.Warnx("invalid TLS connection (not a *tls.Conn)")
return
}
err = conn.VerifyHostname(ci.Host)
if err == nil {
state = conn.ConnectionState()
}
conn.Close()
} else {
_, _ = lib.Warn(err, "TLS verification error with server name %s", ci.Host)
}
if len(state.PeerCertificates) == 0 {
_, _ = lib.Warnx("no certificates found")
return
}
if leafOnly {
displayCert(state.PeerCertificates[0])
return
}
if len(state.VerifiedChains) == 0 {
_, _ = lib.Warnx("no verified chains found; using peer chain")
for i := range state.PeerCertificates {
displayCert(state.PeerCertificates[i])
}
} else {
fmt.Fprintln(os.Stdout, "TLS chain verified successfully.")
for i := range state.VerifiedChains {
fmt.Fprintf(os.Stdout, "--- Verified certificate chain %d ---%s", i+1, "\n")
for j := range state.VerifiedChains[i] {
displayCert(state.VerifiedChains[i][j])
}
}
}
}
func shouldReadStdin(argc int, argv []string) bool {
if argc == 0 {
return true
}
if argc == 1 && argv[0] == "-" {
return true
}
return false
}
func readStdin(leafOnly bool) {
certs, err := io.ReadAll(os.Stdin)
if err != nil {
_, _ = lib.Warn(err, "couldn't read certificates from standard input")
os.Exit(1)
}
// This is needed for getting certs from JSON/jq.
certs = bytes.TrimSpace(certs)
certs = bytes.ReplaceAll(certs, []byte(`\n`), []byte{0xa})
certs = bytes.Trim(certs, `"`)
displayAllCerts(certs, leafOnly)
}
func main() {
var leafOnly bool
flag.BoolVar(&showHash, "d", false, "show hashes of raw DER contents")
@@ -343,23 +351,26 @@ func main() {
flag.BoolVar(&leafOnly, "l", false, "only show the leaf certificate")
flag.Parse()
if shouldReadStdin(flag.NArg(), flag.Args()) {
readStdin(leafOnly)
return
opts := &certlib.FetcherOpts{
SkipVerify: true,
Roots: nil,
}
for _, filename := range flag.Args() {
fmt.Fprintf(os.Stdout, "--%s ---%s", filename, "\n")
if strings.HasPrefix(filename, "https://") {
displayAllCertsWeb(filename, leafOnly)
} else {
in, err := os.ReadFile(filename)
if err != nil {
_, _ = lib.Warn(err, "couldn't read certificate")
continue
}
certs, err := certlib.GetCertificateChain(filename, opts)
if err != nil {
_, _ = lib.Warn(err, "couldn't read certificate")
continue
}
displayAllCerts(in, leafOnly)
if leafOnly {
displayCert(certs[0])
continue
}
for i := range certs {
displayCert(certs[i])
}
}
}

View File

@@ -1,189 +0,0 @@
package main
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"strings"
"github.com/kr/text"
)
// following two lifted from CFSSL, (replace-regexp "\(.+\): \(.+\),"
// "\2: \1,")
const (
sSHA256 = "SHA256"
sSHA512 = "SHA512"
)
var keyUsage = map[x509.KeyUsage]string{
x509.KeyUsageDigitalSignature: "digital signature",
x509.KeyUsageContentCommitment: "content committment",
x509.KeyUsageKeyEncipherment: "key encipherment",
x509.KeyUsageKeyAgreement: "key agreement",
x509.KeyUsageDataEncipherment: "data encipherment",
x509.KeyUsageCertSign: "cert sign",
x509.KeyUsageCRLSign: "crl sign",
x509.KeyUsageEncipherOnly: "encipher only",
x509.KeyUsageDecipherOnly: "decipher only",
}
var extKeyUsages = map[x509.ExtKeyUsage]string{
x509.ExtKeyUsageAny: "any",
x509.ExtKeyUsageServerAuth: "server auth",
x509.ExtKeyUsageClientAuth: "client auth",
x509.ExtKeyUsageCodeSigning: "code signing",
x509.ExtKeyUsageEmailProtection: "s/mime",
x509.ExtKeyUsageIPSECEndSystem: "ipsec end system",
x509.ExtKeyUsageIPSECTunnel: "ipsec tunnel",
x509.ExtKeyUsageIPSECUser: "ipsec user",
x509.ExtKeyUsageTimeStamping: "timestamping",
x509.ExtKeyUsageOCSPSigning: "ocsp signing",
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "microsoft sgc",
x509.ExtKeyUsageNetscapeServerGatedCrypto: "netscape sgc",
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "microsoft commercial code signing",
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "microsoft kernel code signing",
}
func sigAlgoPK(a x509.SignatureAlgorithm) string {
switch a {
case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA:
return "RSA"
case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS:
return "RSA-PSS"
case x509.ECDSAWithSHA1, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512:
return "ECDSA"
case x509.DSAWithSHA1, x509.DSAWithSHA256:
return "DSA"
case x509.PureEd25519:
return "Ed25519"
case x509.UnknownSignatureAlgorithm:
return "unknown public key algorithm"
default:
return "unknown public key algorithm"
}
}
func sigAlgoHash(a x509.SignatureAlgorithm) string {
switch a {
case x509.MD2WithRSA:
return "MD2"
case x509.MD5WithRSA:
return "MD5"
case x509.SHA1WithRSA, x509.ECDSAWithSHA1, x509.DSAWithSHA1:
return "SHA1"
case x509.SHA256WithRSA, x509.ECDSAWithSHA256, x509.DSAWithSHA256:
return sSHA256
case x509.SHA256WithRSAPSS:
return sSHA256
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
return "SHA384"
case x509.SHA384WithRSAPSS:
return "SHA384"
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
return sSHA512
case x509.SHA512WithRSAPSS:
return sSHA512
case x509.PureEd25519:
return sSHA512
case x509.UnknownSignatureAlgorithm:
return "unknown hash algorithm"
default:
return "unknown hash algorithm"
}
}
const maxLine = 78
func makeIndent(n int) string {
s := " "
var sSb97 strings.Builder
for range n {
sSb97.WriteString(" ")
}
s += sSb97.String()
return s
}
func indentLen(n int) int {
return 4 + (8 * n)
}
// this isn't real efficient, but that's not a problem here.
func wrap(s string, indent int) string {
if indent > 3 {
indent = 3
}
wrapped := text.Wrap(s, maxLine)
lines := strings.SplitN(wrapped, "\n", 2)
if len(lines) == 1 {
return lines[0]
}
if (maxLine - indentLen(indent)) <= 0 {
panic("too much indentation")
}
rest := strings.Join(lines[1:], " ")
wrapped = text.Wrap(rest, maxLine-indentLen(indent))
return lines[0] + "\n" + text.Indent(wrapped, makeIndent(indent))
}
func dumpHex(in []byte) string {
var s string
var sSb130 strings.Builder
for i := range in {
sSb130.WriteString(fmt.Sprintf("%02X:", in[i]))
}
s += sSb130.String()
return strings.Trim(s, ":")
}
// permissiveConfig returns a maximally-accepting TLS configuration;
// the purpose is to look at the cert, not verify the security properties
// of the connection.
func permissiveConfig() *tls.Config {
return &tls.Config{
InsecureSkipVerify: true,
} // #nosec G402
}
// verifyConfig returns a config that will verify the connection.
func verifyConfig(hostname string) *tls.Config {
return &tls.Config{
ServerName: hostname,
} // #nosec G402
}
type connInfo struct {
// The original URI provided.
URI string
// The hostname of the server.
Host string
// The port to connect on.
Port string
// The address to connect to.
Addr string
}
func getConnInfo(uri string) *connInfo {
ci := &connInfo{URI: uri}
ci.Host = uri[len("https://"):]
host, port, err := net.SplitHostPort(ci.Host)
if err != nil {
ci.Port = "443"
} else {
ci.Host = host
ci.Port = port
}
ci.Addr = net.JoinHostPort(ci.Host, ci.Port)
return ci
}

View File

@@ -75,18 +75,15 @@ func checkCert(cert *x509.Certificate) {
}
func main() {
opts := &certlib.FetcherOpts{}
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
flag.BoolVar(&warnOnly, "q", false, "only warn about expiring certs")
flag.DurationVar(&leeway, "t", leeway, "warn if certificates are closer than this to expiring")
flag.Parse()
for _, file := range flag.Args() {
in, err := os.ReadFile(file)
if err != nil {
_, _ = lib.Warn(err, "failed to read file")
continue
}
certs, err := certlib.ParseCertificatesPEM(in)
certs, err := certlib.GetCertificateChain(file, opts)
if err != nil {
_, _ = lib.Warn(err, "while parsing certificates")
continue

53
cmd/certser/main.go Normal file
View File

@@ -0,0 +1,53 @@
package main
import (
"crypto/x509"
"flag"
"fmt"
"strings"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
const displayInt lib.HexEncodeMode = iota
func parseDisplayMode(mode string) lib.HexEncodeMode {
mode = strings.ToLower(mode)
if mode == "int" {
return displayInt
}
return lib.ParseHexEncodeMode(mode)
}
func serialString(cert *x509.Certificate, mode lib.HexEncodeMode) string {
if mode == displayInt {
return cert.SerialNumber.String()
}
return lib.HexEncode(cert.SerialNumber.Bytes(), mode)
}
func main() {
opts := &certlib.FetcherOpts{}
displayAs := flag.String("d", "int", "display mode (int, hex, uhex)")
showExpiry := flag.Bool("e", false, "show expiry date")
flag.BoolVar(&opts.SkipVerify, "k", false, "skip server verification")
flag.Parse()
displayMode := parseDisplayMode(*displayAs)
for _, arg := range flag.Args() {
cert, err := certlib.GetCertificate(arg, opts)
die.If(err)
fmt.Printf("%s: %s", arg, serialString(cert, displayMode))
if *showExpiry {
fmt.Printf(" (%s)", cert.NotAfter.Format("2006-01-02"))
}
fmt.Println()
}
}

View File

@@ -29,9 +29,9 @@ func printRevocation(cert *x509.Certificate) {
}
type appConfig struct {
caFile, intFile string
forceIntermediateBundle bool
revexp, verbose bool
caFile, intFile string
forceIntermediateBundle bool
revexp, skipVerify, verbose bool
}
func parseFlags() appConfig {
@@ -40,6 +40,7 @@ func parseFlags() appConfig {
flag.StringVar(&cfg.intFile, "i", "", "intermediate `bundle`")
flag.BoolVar(&cfg.forceIntermediateBundle, "f", false,
"force the use of the intermediate bundle, ignoring any intermediates bundled with certificate")
flag.BoolVar(&cfg.skipVerify, "k", false, "skip CA verification")
flag.BoolVar(&cfg.revexp, "r", false, "print revocation and expiry information")
flag.BoolVar(&cfg.verbose, "v", false, "verbose")
flag.Parse()
@@ -102,12 +103,17 @@ func run(cfg appConfig) error {
fmt.Fprintf(os.Stderr, "Usage: %s [-ca bundle] [-i bundle] cert", lib.ProgName())
}
fileData, err := os.ReadFile(flag.Arg(0))
combinedPool, err := certlib.LoadFullCertPool(cfg.caFile, cfg.intFile)
if err != nil {
return err
return fmt.Errorf("failed to build combined pool: %w", err)
}
chain, err := certlib.ParseCertificatesPEM(fileData)
opts := &certlib.FetcherOpts{
Roots: combinedPool,
SkipVerify: cfg.skipVerify,
}
chain, err := certlib.GetCertificateChain(flag.Arg(0), opts)
if err != nil {
return err
}

View File

@@ -13,7 +13,6 @@ import (
"fmt"
"io"
"os"
"strings"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/die"
@@ -32,10 +31,10 @@ Usage:
ski [-hm] files...
Flags:
-d Hex encoding mode.
-h Print this help message.
-m All SKIs should match; as soon as an SKI mismatch is found,
it is reported.
`)
}
@@ -145,15 +144,8 @@ func parseCSR(data []byte) ([]byte, string) {
return public, kt
}
func dumpHex(in []byte) string {
var s string
var sSb153 strings.Builder
for i := range in {
sSb153.WriteString(fmt.Sprintf("%02X:", in[i]))
}
s += sSb153.String()
return strings.Trim(s, ":")
func dumpHex(in []byte, mode lib.HexEncodeMode) string {
return lib.HexEncode(in, mode)
}
type subjectPublicKeyInfo struct {
@@ -163,10 +155,14 @@ type subjectPublicKeyInfo struct {
func main() {
var help, shouldMatch bool
var displayModeString string
flag.StringVar(&displayModeString, "d", "lower", "hex encoding mode")
flag.BoolVar(&help, "h", false, "print a help message and exit")
flag.BoolVar(&shouldMatch, "m", false, "all SKIs should match")
flag.Parse()
displayMode := lib.ParseHexEncodeMode(displayModeString)
if help {
usage(os.Stdout)
os.Exit(0)
@@ -184,7 +180,7 @@ func main() {
}
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) // #nosec G401 this is the standard
pubHashString := dumpHex(pubHash[:])
pubHashString := dumpHex(pubHash[:], displayMode)
if ski == "" {
ski = pubHashString
}

2
go.mod
View File

@@ -15,6 +15,7 @@ require (
github.com/benbjohnson/clock v1.3.5
github.com/davecgh/go-spew v1.1.1
github.com/google/certificate-transparency-go v1.0.21
rsc.io/qr v0.2.0
)
require (
@@ -22,5 +23,4 @@ require (
github.com/kr/pretty v0.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
rsc.io/qr v0.2.0 // indirect
)

View File

@@ -2,9 +2,11 @@
package lib
import (
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"time"
)
@@ -109,3 +111,130 @@ func Duration(d time.Duration) string {
s += fmt.Sprintf("%dh%s", hours, d)
return s
}
type HexEncodeMode uint8
const (
// HexEncodeLower prints the bytes as lowercase hexadecimal.
HexEncodeLower HexEncodeMode = iota + 1
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
HexEncodeUpper
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
// with colons between each pair of bytes.
HexEncodeLowerColon
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
// with colons between each pair of bytes.
HexEncodeUpperColon
// HexEncodeBytes prints the string as a sequence of []byte.
HexEncodeBytes
)
func (m HexEncodeMode) String() string {
switch m {
case HexEncodeLower:
return "lower"
case HexEncodeUpper:
return "upper"
case HexEncodeLowerColon:
return "lcolon"
case HexEncodeUpperColon:
return "ucolon"
case HexEncodeBytes:
return "bytes"
default:
panic("invalid hex encode mode")
}
}
func ParseHexEncodeMode(s string) HexEncodeMode {
switch strings.ToLower(s) {
case "lower":
return HexEncodeLower
case "upper":
return HexEncodeUpper
case "lcolon":
return HexEncodeLowerColon
case "ucolon":
return HexEncodeUpperColon
case "bytes":
return HexEncodeBytes
}
panic("invalid hex encode mode")
}
func hexColons(s string) string {
if len(s)%2 != 0 {
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
panic("invalid hex string length")
}
n := len(s)
if n <= 2 {
return s
}
pairCount := n / 2
if n%2 != 0 {
pairCount++
}
var b strings.Builder
b.Grow(n + pairCount - 1)
for i := 0; i < n; i += 2 {
b.WriteByte(s[i])
if i+1 < n {
b.WriteByte(s[i+1])
}
if i+2 < n {
b.WriteByte(':')
}
}
return b.String()
}
func hexEncode(b []byte) string {
s := hex.EncodeToString(b)
if len(s)%2 != 0 {
s = "0" + s
}
return s
}
func bytesAsByteSliceString(buf []byte) string {
sb := &strings.Builder{}
sb.WriteString("[]byte{")
for i := range buf {
fmt.Fprintf(sb, "0x%02x, ", buf[i])
}
sb.WriteString("}")
return sb.String()
}
// HexEncode encodes the given bytes as a hexadecimal string.
func HexEncode(b []byte, mode HexEncodeMode) string {
str := hexEncode(b)
switch mode {
case HexEncodeLower:
return str
case HexEncodeUpper:
return strings.ToUpper(str)
case HexEncodeLowerColon:
return hexColons(str)
case HexEncodeUpperColon:
return strings.ToUpper(hexColons(str))
case HexEncodeBytes:
return bytesAsByteSliceString(b)
default:
panic("invalid hex encode mode")
}
}

79
lib/lib_test.go Normal file
View File

@@ -0,0 +1,79 @@
package lib_test
import (
"testing"
"git.wntrmute.dev/kyle/goutils/lib"
)
func TestHexEncode_LowerUpper(t *testing.T) {
b := []byte{0x0f, 0xa1, 0x00, 0xff}
gotLower := lib.HexEncode(b, lib.HexEncodeLower)
if gotLower != "0fa100ff" {
t.Fatalf("lib.HexEncode lower: expected %q, got %q", "0fa100ff", gotLower)
}
gotUpper := lib.HexEncode(b, lib.HexEncodeUpper)
if gotUpper != "0FA100FF" {
t.Fatalf("lib.HexEncode upper: expected %q, got %q", "0FA100FF", gotUpper)
}
}
func TestHexEncode_ColonModes(t *testing.T) {
// Includes leading zero nibble and a zero byte to verify padding and separators
b := []byte{0x0f, 0xa1, 0x00, 0xff}
gotLColon := lib.HexEncode(b, lib.HexEncodeLowerColon)
if gotLColon != "0f:a1:00:ff" {
t.Fatalf("lib.HexEncode colon lower: expected %q, got %q", "0f:a1:00:ff", gotLColon)
}
gotUColon := lib.HexEncode(b, lib.HexEncodeUpperColon)
if gotUColon != "0F:A1:00:FF" {
t.Fatalf("lib.HexEncode colon upper: expected %q, got %q", "0F:A1:00:FF", gotUColon)
}
}
func TestHexEncode_EmptyInput(t *testing.T) {
var b []byte
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "" {
t.Fatalf("empty lower: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "" {
t.Fatalf("empty upper: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "" {
t.Fatalf("empty colon lower: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "" {
t.Fatalf("empty colon upper: expected empty string, got %q", got)
}
}
func TestHexEncode_SingleByte(t *testing.T) {
b := []byte{0x0f}
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "0f" {
t.Fatalf("single byte lower: expected %q, got %q", "0f", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "0F" {
t.Fatalf("single byte upper: expected %q, got %q", "0F", got)
}
// For a single byte, colon modes should not introduce separators
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "0f" {
t.Fatalf("single byte colon lower: expected %q, got %q", "0f", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "0F" {
t.Fatalf("single byte colon upper: expected %q, got %q", "0F", got)
}
}
func TestHexEncode_InvalidModePanics(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("expected panic for invalid mode, but function returned normally")
}
}()
// 0 is not a valid lib.HexEncodeMode (valid modes start at 1)
_ = lib.HexEncode([]byte{0x01}, lib.HexEncodeMode(0))
}