Compare commits

...

10 Commits

Author SHA1 Message Date
0a71661901 CHANGELOG: bump to v1.13.2. 2025-11-17 15:50:51 -08:00
804f53d27d Refactor bundling into separate package. 2025-11-17 15:08:10 -08:00
cfb80355bb Update CHANGELOG for v1.13.1. 2025-11-17 10:08:05 -08:00
77160395a0 Cleaning up a few things. 2025-11-17 10:07:03 -08:00
37d5e04421 Adding Dockerfile 2025-11-17 09:03:43 -08:00
dc54eeacbc Remove cert bundles generated in testdata. 2025-11-17 08:36:31 -08:00
e2a3081ce5 cmd: add certser command. 2025-11-17 07:18:46 -08:00
3149d958f4 cmd: add certser 2025-11-17 06:55:20 -08:00
f296344acf twofactor: linting fixes 2025-11-16 21:51:38 -08:00
3fb2d88a3f go get rsc.io/qr 2025-11-16 20:44:13 -08:00
30 changed files with 1133 additions and 932 deletions

1
.gitignore vendored
View File

@@ -1 +1,2 @@
.idea
cmd/cert-bundler/testdata/pkg/*

View File

@@ -12,6 +12,12 @@
version: "2"
output:
sort-order:
- file
- linter
- severity
issues:
# Maximum count of issues with the same text.
# Set to 0 to disable.
@@ -384,6 +390,9 @@ linters:
- 3
- 4
- 8
- 24
- 30
- 365
nakedret:
# Make an issue if func has more lines of code than this setting, and it has naked returns.
@@ -454,6 +463,8 @@ linters:
- -QF1008
# We often explicitly enable old/deprecated ciphers for research.
- -SA1019
# Covered by revive.
- -ST1003
usetesting:
# Enable/disable `os.TempDir()` detections.
@@ -472,6 +483,8 @@ linters:
rules:
- path: 'ahash/ahash.go'
linters: [ staticcheck, gosec ]
- path: 'twofactor/.*.go'
linters: [ exhaustive, mnd, revive ]
- path: 'backoff/backoff_test.go'
linters: [ testpackage ]
- path: 'dbg/dbg_test.go'

View File

@@ -1,5 +1,40 @@
CHANGELOG
v1.13.2 - 2025-11-17
Add:
- certlib/bundler: refactor certificate bundling from cmd/cert-bundler
into a separate package.
Changed:
- cmd/cert-bundler: refactor to use bundler package, and update Dockerfile.
v1.13.1 - 2025-11-17
Add:
- Dockerfile for cert-bundler.
v1.13.0 - 2025-11-16
Add:
- cmd/certser: print serial numbers for certificates.
- lib/HexEncode: add a new hex encode function handling multiple output
formats, including with and without colons.
v1.12.4 - 2025-11-16
Changed:
- Linting fixes for twofactor that were previously masked.
v1.12.3 erroneously tagged and pushed
v1.12.2 - 2025-11-16
Changed:
- add rsc.io/qr dependency for twofactor.
v1.12.1 - 2025-11-16
Changed:

668
certlib/bundler/bundler.go Normal file
View File

@@ -0,0 +1,668 @@
package bundler
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"gopkg.in/yaml.v2"
"git.wntrmute.dev/kyle/goutils/certlib"
)
const defaultFileMode = 0644
// Config represents the top-level YAML configuration.
type Config struct {
Config struct {
Hashes string `yaml:"hashes"`
Expiry string `yaml:"expiry"`
} `yaml:"config"`
Chains map[string]ChainGroup `yaml:"chains"`
}
// ChainGroup represents a named group of certificate chains.
type ChainGroup struct {
Certs []CertChain `yaml:"certs"`
Outputs Outputs `yaml:"outputs"`
}
// CertChain represents a root certificate and its intermediates.
type CertChain struct {
Root string `yaml:"root"`
Intermediates []string `yaml:"intermediates"`
}
// Outputs defines output format options.
type Outputs struct {
IncludeSingle bool `yaml:"include_single"`
IncludeIndividual bool `yaml:"include_individual"`
Manifest bool `yaml:"manifest"`
Formats []string `yaml:"formats"`
Encoding string `yaml:"encoding"`
}
var formatExtensions = map[string]string{
"zip": ".zip",
"tgz": ".tar.gz",
}
// Run performs the bundling operation given a config file path and an output directory.
func Run(configFile string, outputDir string) error {
if configFile == "" {
return errors.New("configuration file required")
}
cfg, err := loadConfig(configFile)
if err != nil {
return fmt.Errorf("loading config: %w", err)
}
expiryDuration := 365 * 24 * time.Hour
if cfg.Config.Expiry != "" {
expiryDuration, err = parseDuration(cfg.Config.Expiry)
if err != nil {
return fmt.Errorf("parsing expiry: %w", err)
}
}
if err = os.MkdirAll(outputDir, 0750); err != nil {
return fmt.Errorf("creating output directory: %w", err)
}
totalFormats := 0
for _, group := range cfg.Chains {
totalFormats += len(group.Outputs.Formats)
}
createdFiles := make([]string, 0, totalFormats)
for groupName, group := range cfg.Chains {
files, perr := processChainGroup(groupName, group, expiryDuration, outputDir)
if perr != nil {
return fmt.Errorf("processing chain group %s: %w", groupName, perr)
}
createdFiles = append(createdFiles, files...)
}
if cfg.Config.Hashes != "" {
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
return fmt.Errorf("generating hash file: %w", gerr)
}
}
return nil
}
func loadConfig(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
return nil, uerr
}
return &cfg, nil
}
func parseDuration(s string) (time.Duration, error) {
// Support simple formats like "1y", "6m", "30d"
if len(s) < 2 {
return 0, fmt.Errorf("invalid duration format: %s", s)
}
unit := s[len(s)-1]
value := s[:len(s)-1]
var multiplier time.Duration
switch unit {
case 'y', 'Y':
multiplier = 365 * 24 * time.Hour
case 'm', 'M':
multiplier = 30 * 24 * time.Hour
case 'd', 'D':
multiplier = 24 * time.Hour
default:
return time.ParseDuration(s)
}
var num int
_, err := fmt.Sscanf(value, "%d", &num)
if err != nil {
return 0, fmt.Errorf("invalid duration value: %s", s)
}
return time.Duration(num) * multiplier, nil
}
func processChainGroup(
groupName string,
group ChainGroup,
expiryDuration time.Duration,
outputDir string,
) ([]string, error) {
// Default encoding to "pem" if not specified
encoding := group.Outputs.Encoding
if encoding == "" {
encoding = "pem"
}
// Collect certificates from all chains in the group
singleFileCerts, individualCerts, sourcePaths, err := loadAndCollectCerts(
group.Certs,
group.Outputs,
expiryDuration,
)
if err != nil {
return nil, err
}
// Prepare files for inclusion in archives
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, sourcePaths, group.Outputs, encoding)
if err != nil {
return nil, err
}
// Create archives for the entire group
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles, outputDir)
if err != nil {
return nil, err
}
return createdFiles, nil
}
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
func loadAndCollectCerts(
chains []CertChain,
outputs Outputs,
expiryDuration time.Duration,
) ([]*x509.Certificate, []certWithPath, []string, error) {
var singleFileCerts []*x509.Certificate
var individualCerts []certWithPath
var sourcePaths []string
for _, chain := range chains {
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
if cerr != nil {
return nil, nil, nil, cerr
}
if len(s) > 0 {
singleFileCerts = append(singleFileCerts, s...)
}
if len(i) > 0 {
individualCerts = append(individualCerts, i...)
}
// Record source paths for timestamp preservation
// Only append when loading succeeded
sourcePaths = append(sourcePaths, chain.Root)
sourcePaths = append(sourcePaths, chain.Intermediates...)
}
return singleFileCerts, individualCerts, sourcePaths, nil
}
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
func collectFromChain(
chain CertChain,
outputs Outputs,
expiryDuration time.Duration,
) (
[]*x509.Certificate,
[]certWithPath,
error,
) {
var single []*x509.Certificate
var indiv []certWithPath
// Load root certificate
rootCert, rerr := certlib.LoadCertificate(chain.Root)
if rerr != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
single = append(single, rootCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, lerr := certlib.LoadCertificate(intPath)
if lerr != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
}
// Validate that intermediate is signed by root
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
return nil, nil, fmt.Errorf(
"intermediate %s is not properly signed by root %s: %w",
intPath,
chain.Root,
sigErr,
)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
single = append(single, intCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
}
}
return single, indiv, nil
}
// prepareArchiveFiles prepares all files to be included in archives.
func prepareArchiveFiles(
singleFileCerts []*x509.Certificate,
individualCerts []certWithPath,
sourcePaths []string,
outputs Outputs,
encoding string,
) ([]fileEntry, error) {
var archiveFiles []fileEntry
// Track used filenames to avoid collisions inside archives
usedNames := make(map[string]int)
// Handle a single bundle file
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
bundleTime := maxModTime(sourcePaths)
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
if err != nil {
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
}
for i := range files {
files[i].name = makeUniqueName(files[i].name, usedNames)
files[i].modTime = bundleTime
// Best-effort: we do not have a portable birth/creation time.
// Use the same timestamp for created time to track deterministically.
files[i].createTime = bundleTime
}
archiveFiles = append(archiveFiles, files...)
}
// Handle individual files
if outputs.IncludeIndividual {
for _, cp := range individualCerts {
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
if err != nil {
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
}
mt := fileModTime(cp.path)
for i := range files {
files[i].name = makeUniqueName(files[i].name, usedNames)
files[i].modTime = mt
files[i].createTime = mt
}
archiveFiles = append(archiveFiles, files...)
}
}
// Generate manifest if requested
if outputs.Manifest {
manifestContent := generateManifest(archiveFiles)
manifestName := makeUniqueName("MANIFEST", usedNames)
mt := maxModTime(sourcePaths)
archiveFiles = append(archiveFiles, fileEntry{
name: manifestName,
content: manifestContent,
modTime: mt,
createTime: mt,
})
}
return archiveFiles, nil
}
// createArchiveFiles creates archive files in the specified formats.
func createArchiveFiles(
groupName string,
formats []string,
archiveFiles []fileEntry,
outputDir string,
) ([]string, error) {
createdFiles := make([]string, 0, len(formats))
for _, format := range formats {
ext, ok := formatExtensions[format]
if !ok {
return nil, fmt.Errorf("unsupported format: %s", format)
}
archivePath := filepath.Join(outputDir, groupName+ext)
switch format {
case "zip":
if err := createZipArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create zip archive: %w", err)
}
case "tgz":
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
}
default:
return nil, fmt.Errorf("unsupported format: %s", format)
}
createdFiles = append(createdFiles, archivePath)
}
return createdFiles, nil
}
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
now := time.Now()
expiryThreshold := now.Add(expiryDuration)
if cert.NotAfter.Before(expiryThreshold) {
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
if daysUntilExpiry < 0 {
fmt.Fprintf(
os.Stderr,
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
path,
-daysUntilExpiry,
)
} else {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
}
}
}
type fileEntry struct {
name string
content []byte
modTime time.Time
createTime time.Time
}
type certWithPath struct {
cert *x509.Certificate
path string
}
// encodeCertsToFiles converts certificates to file entries based on encoding type
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
func encodeCertsToFiles(
certs []*x509.Certificate,
baseName string,
encoding string,
isSingle bool,
) ([]fileEntry, error) {
var files []fileEntry
switch encoding {
case "pem":
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
case "der":
if isSingle {
// For single file in DER, concatenate all cert DER bytes
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
// Individual DER file (should only have one cert)
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
case "both":
// Add PEM version
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
// Add DER version
if isSingle {
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
default:
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
}
return files, nil
}
// encodeCertsToPEM encodes certificates to PEM format.
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
var pemContent []byte
for _, cert := range certs {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
}
return pemContent
}
func generateManifest(files []fileEntry) []byte {
var manifest strings.Builder
for _, file := range files {
if file.name == "MANIFEST" {
continue
}
hash := sha256.Sum256(file.content)
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
}
return []byte(manifest.String())
}
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
func closeWithErr(baseErr error, closers ...io.Closer) error {
for _, c := range closers {
if c == nil {
continue
}
if cerr := c.Close(); cerr != nil {
baseErr = errors.Join(baseErr, cerr)
}
}
return baseErr
}
func createZipArchive(path string, files []fileEntry) error {
f, zerr := os.Create(path)
if zerr != nil {
return zerr
}
w := zip.NewWriter(f)
for _, file := range files {
hdr := &zip.FileHeader{
Name: file.name,
Method: zip.Deflate,
}
if !file.modTime.IsZero() {
hdr.SetModTime(file.modTime)
}
fw, werr := w.CreateHeader(hdr)
if werr != nil {
return closeWithErr(werr, w, f)
}
if _, werr = fw.Write(file.content); werr != nil {
return closeWithErr(werr, w, f)
}
}
// Check errors on close operations
if cerr := w.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func createTarGzArchive(path string, files []fileEntry) error {
f, terr := os.Create(path)
if terr != nil {
return terr
}
gw := gzip.NewWriter(f)
tw := tar.NewWriter(gw)
for _, file := range files {
hdr := &tar.Header{
Name: file.name,
Uid: 0,
Gid: 0,
Mode: defaultFileMode,
Size: int64(len(file.content)),
ModTime: func() time.Time {
if file.modTime.IsZero() {
return time.Now()
}
return file.modTime
}(),
}
// Set additional times if supported
hdr.AccessTime = hdr.ModTime
if !file.createTime.IsZero() {
hdr.ChangeTime = file.createTime
} else {
hdr.ChangeTime = hdr.ModTime
}
if herr := tw.WriteHeader(hdr); herr != nil {
return closeWithErr(herr, tw, gw, f)
}
if _, werr := tw.Write(file.content); werr != nil {
return closeWithErr(werr, tw, gw, f)
}
}
// Check errors on close operations in the correct order
if cerr := tw.Close(); cerr != nil {
_ = gw.Close()
_ = f.Close()
return cerr
}
if cerr := gw.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func generateHashFile(path string, files []string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
for _, file := range files {
data, rerr := os.ReadFile(file)
if rerr != nil {
return rerr
}
hash := sha256.Sum256(data)
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
}
return nil
}
// makeUniqueName ensures that each file name within the archive is unique by appending
// an incremental numeric suffix before the extension when collisions occur.
// Example: "root.pem" -> "root-2.pem", "root-3.pem", etc.
func makeUniqueName(name string, used map[string]int) string {
// If unused, mark and return as-is
if _, ok := used[name]; !ok {
used[name] = 1
return name
}
ext := filepath.Ext(name)
base := strings.TrimSuffix(name, ext)
// Track a counter per base+ext key
key := base + ext
counter := max(used[key], 1)
for {
counter++
candidate := fmt.Sprintf("%s-%d%s", base, counter, ext)
if _, exists := used[candidate]; !exists {
used[key] = counter
used[candidate] = 1
return candidate
}
}
}
// fileModTime returns the file's modification time, or time.Now() if stat fails.
func fileModTime(path string) time.Time {
fi, err := os.Stat(path)
if err != nil {
return time.Now()
}
return fi.ModTime()
}
// maxModTime returns the latest modification time across provided paths.
// If the list is empty or stats fail, returns time.Now().
func maxModTime(paths []string) time.Time {
var zero time.Time
maxTime := zero
for _, p := range paths {
fi, err := os.Stat(p)
if err != nil {
continue
}
mt := fi.ModTime()
if maxTime.IsZero() || mt.After(maxTime) {
maxTime = mt
}
}
if maxTime.IsZero() {
return time.Now()
}
return maxTime
}

View File

@@ -0,0 +1,28 @@
# Build and runtime image for cert-bundler
# Usage (from repo root or cmd/cert-bundler directory):
# docker build -t cert-bundler:latest -f cmd/cert-bundler/Dockerfile .
# docker run --rm -v "$PWD":/work cert-bundler:latest
# This expects a /work/bundle.yaml file in the mounted directory and
# will write generated bundles to /work/bundle.
# Build stage
FROM golang:1.24.3-alpine AS build
WORKDIR /src
# Copy go module files and download dependencies first for better caching
RUN go install git.wntrmute.dev/kyle/goutils/cmd/cert-bundler@v1.13.2 && \
mv /go/bin/cert-bundler /usr/local/bin/cert-bundler
# Runtime stage (kept as golang:alpine per requirement)
FROM golang:1.24.3-alpine
# Create a work directory that users will typically mount into
WORKDIR /work
VOLUME ["/work"]
# Copy the built binary from the builder stage
COPY --from=build /usr/local/bin/cert-bundler /usr/local/bin/cert-bundler
# Default command: read bundle.yaml from current directory and output to ./bundle
ENTRYPOINT ["/usr/local/bin/cert-bundler"]
CMD ["-c", "/work/bundle.yaml", "-o", "/work/bundle"]

View File

@@ -1,66 +1,19 @@
package main
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"crypto/x509"
_ "embed"
"encoding/pem"
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"git.wntrmute.dev/kyle/goutils/certlib"
"gopkg.in/yaml.v2"
"git.wntrmute.dev/kyle/goutils/certlib/bundler"
)
// Config represents the top-level YAML configuration.
type Config struct {
Config struct {
Hashes string `yaml:"hashes"`
Expiry string `yaml:"expiry"`
} `yaml:"config"`
Chains map[string]ChainGroup `yaml:"chains"`
}
// ChainGroup represents a named group of certificate chains.
type ChainGroup struct {
Certs []CertChain `yaml:"certs"`
Outputs Outputs `yaml:"outputs"`
}
// CertChain represents a root certificate and its intermediates.
type CertChain struct {
Root string `yaml:"root"`
Intermediates []string `yaml:"intermediates"`
}
// Outputs defines output format options.
type Outputs struct {
IncludeSingle bool `yaml:"include_single"`
IncludeIndividual bool `yaml:"include_individual"`
Manifest bool `yaml:"manifest"`
Formats []string `yaml:"formats"`
Encoding string `yaml:"encoding"`
}
var (
configFile string
outputDir string
)
var formatExtensions = map[string]string{
"zip": ".zip",
"tgz": ".tar.gz",
}
//go:embed README.txt
var readmeContent string
@@ -79,497 +32,10 @@ func main() {
os.Exit(1)
}
// Load and parse configuration
cfg, err := loadConfig(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading config: %v\n", err)
if err := bundler.Run(configFile, outputDir); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Parse expiry duration (default 1 year)
expiryDuration := 365 * 24 * time.Hour
if cfg.Config.Expiry != "" {
expiryDuration, err = parseDuration(cfg.Config.Expiry)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing expiry: %v\n", err)
os.Exit(1)
}
}
// Create output directory if it doesn't exist
err = os.MkdirAll(outputDir, 0750)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
os.Exit(1)
}
// Process each chain group
// Pre-allocate createdFiles based on total number of formats across all groups
totalFormats := 0
for _, group := range cfg.Chains {
totalFormats += len(group.Outputs.Formats)
}
createdFiles := make([]string, 0, totalFormats)
for groupName, group := range cfg.Chains {
files, perr := processChainGroup(groupName, group, expiryDuration)
if perr != nil {
fmt.Fprintf(os.Stderr, "Error processing chain group %s: %v\n", groupName, perr)
os.Exit(1)
}
createdFiles = append(createdFiles, files...)
}
// Generate hash file for all created archives
if cfg.Config.Hashes != "" {
hashFile := filepath.Join(outputDir, cfg.Config.Hashes)
if gerr := generateHashFile(hashFile, createdFiles); gerr != nil {
fmt.Fprintf(os.Stderr, "Error generating hash file: %v\n", gerr)
os.Exit(1)
}
}
fmt.Println("Certificate bundling completed successfully")
}
func loadConfig(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
if uerr := yaml.Unmarshal(data, &cfg); uerr != nil {
return nil, uerr
}
return &cfg, nil
}
func parseDuration(s string) (time.Duration, error) {
// Support simple formats like "1y", "6m", "30d"
if len(s) < 2 {
return 0, fmt.Errorf("invalid duration format: %s", s)
}
unit := s[len(s)-1]
value := s[:len(s)-1]
var multiplier time.Duration
switch unit {
case 'y', 'Y':
multiplier = 365 * 24 * time.Hour
case 'm', 'M':
multiplier = 30 * 24 * time.Hour
case 'd', 'D':
multiplier = 24 * time.Hour
default:
return time.ParseDuration(s)
}
var num int
_, err := fmt.Sscanf(value, "%d", &num)
if err != nil {
return 0, fmt.Errorf("invalid duration value: %s", s)
}
return time.Duration(num) * multiplier, nil
}
func processChainGroup(groupName string, group ChainGroup, expiryDuration time.Duration) ([]string, error) {
// Default encoding to "pem" if not specified
encoding := group.Outputs.Encoding
if encoding == "" {
encoding = "pem"
}
// Collect certificates from all chains in the group
singleFileCerts, individualCerts, err := loadAndCollectCerts(group.Certs, group.Outputs, expiryDuration)
if err != nil {
return nil, err
}
// Prepare files for inclusion in archives
archiveFiles, err := prepareArchiveFiles(singleFileCerts, individualCerts, group.Outputs, encoding)
if err != nil {
return nil, err
}
// Create archives for the entire group
createdFiles, err := createArchiveFiles(groupName, group.Outputs.Formats, archiveFiles)
if err != nil {
return nil, err
}
return createdFiles, nil
}
// loadAndCollectCerts loads all certificates from chains and collects them for processing.
func loadAndCollectCerts(
chains []CertChain,
outputs Outputs,
expiryDuration time.Duration,
) ([]*x509.Certificate, []certWithPath, error) {
var singleFileCerts []*x509.Certificate
var individualCerts []certWithPath
for _, chain := range chains {
s, i, cerr := collectFromChain(chain, outputs, expiryDuration)
if cerr != nil {
return nil, nil, cerr
}
if len(s) > 0 {
singleFileCerts = append(singleFileCerts, s...)
}
if len(i) > 0 {
individualCerts = append(individualCerts, i...)
}
}
return singleFileCerts, individualCerts, nil
}
// collectFromChain loads a single chain, performs checks, and returns the certs to include.
func collectFromChain(
chain CertChain,
outputs Outputs,
expiryDuration time.Duration,
) (
[]*x509.Certificate,
[]certWithPath,
error,
) {
var single []*x509.Certificate
var indiv []certWithPath
// Load root certificate
rootCert, rerr := certlib.LoadCertificate(chain.Root)
if rerr != nil {
return nil, nil, fmt.Errorf("failed to load root certificate %s: %w", chain.Root, rerr)
}
// Check expiry for root
checkExpiry(chain.Root, rootCert, expiryDuration)
// Add root to collections if needed
if outputs.IncludeSingle {
single = append(single, rootCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: rootCert, path: chain.Root})
}
// Load and validate intermediates
for _, intPath := range chain.Intermediates {
intCert, lerr := certlib.LoadCertificate(intPath)
if lerr != nil {
return nil, nil, fmt.Errorf("failed to load intermediate certificate %s: %w", intPath, lerr)
}
// Validate that intermediate is signed by root
if sigErr := intCert.CheckSignatureFrom(rootCert); sigErr != nil {
return nil, nil, fmt.Errorf(
"intermediate %s is not properly signed by root %s: %w",
intPath,
chain.Root,
sigErr,
)
}
// Check expiry for intermediate
checkExpiry(intPath, intCert, expiryDuration)
// Add intermediate to collections if needed
if outputs.IncludeSingle {
single = append(single, intCert)
}
if outputs.IncludeIndividual {
indiv = append(indiv, certWithPath{cert: intCert, path: intPath})
}
}
return single, indiv, nil
}
// prepareArchiveFiles prepares all files to be included in archives.
func prepareArchiveFiles(
singleFileCerts []*x509.Certificate,
individualCerts []certWithPath,
outputs Outputs,
encoding string,
) ([]fileEntry, error) {
var archiveFiles []fileEntry
// Handle a single bundle file
if outputs.IncludeSingle && len(singleFileCerts) > 0 {
files, err := encodeCertsToFiles(singleFileCerts, "bundle", encoding, true)
if err != nil {
return nil, fmt.Errorf("failed to encode single bundle: %w", err)
}
archiveFiles = append(archiveFiles, files...)
}
// Handle individual files
if outputs.IncludeIndividual {
for _, cp := range individualCerts {
baseName := strings.TrimSuffix(filepath.Base(cp.path), filepath.Ext(cp.path))
files, err := encodeCertsToFiles([]*x509.Certificate{cp.cert}, baseName, encoding, false)
if err != nil {
return nil, fmt.Errorf("failed to encode individual cert %s: %w", cp.path, err)
}
archiveFiles = append(archiveFiles, files...)
}
}
// Generate manifest if requested
if outputs.Manifest {
manifestContent := generateManifest(archiveFiles)
archiveFiles = append(archiveFiles, fileEntry{
name: "MANIFEST",
content: manifestContent,
})
}
return archiveFiles, nil
}
// createArchiveFiles creates archive files in the specified formats.
func createArchiveFiles(groupName string, formats []string, archiveFiles []fileEntry) ([]string, error) {
createdFiles := make([]string, 0, len(formats))
for _, format := range formats {
ext, ok := formatExtensions[format]
if !ok {
return nil, fmt.Errorf("unsupported format: %s", format)
}
archivePath := filepath.Join(outputDir, groupName+ext)
switch format {
case "zip":
if err := createZipArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create zip archive: %w", err)
}
case "tgz":
if err := createTarGzArchive(archivePath, archiveFiles); err != nil {
return nil, fmt.Errorf("failed to create tar.gz archive: %w", err)
}
default:
return nil, fmt.Errorf("unsupported format: %s", format)
}
createdFiles = append(createdFiles, archivePath)
}
return createdFiles, nil
}
func checkExpiry(path string, cert *x509.Certificate, expiryDuration time.Duration) {
now := time.Now()
expiryThreshold := now.Add(expiryDuration)
if cert.NotAfter.Before(expiryThreshold) {
daysUntilExpiry := int(cert.NotAfter.Sub(now).Hours() / 24)
if daysUntilExpiry < 0 {
fmt.Fprintf(
os.Stderr,
"WARNING: Certificate %s has EXPIRED (expired %d days ago)\n",
path,
-daysUntilExpiry,
)
} else {
fmt.Fprintf(os.Stderr, "WARNING: Certificate %s will expire in %d days (on %s)\n", path, daysUntilExpiry, cert.NotAfter.Format("2006-01-02"))
}
}
}
type fileEntry struct {
name string
content []byte
}
type certWithPath struct {
cert *x509.Certificate
path string
}
// encodeCertsToFiles converts certificates to file entries based on encoding type
// If isSingle is true, certs are concatenated into a single file; otherwise one cert per file.
func encodeCertsToFiles(
certs []*x509.Certificate,
baseName string,
encoding string,
isSingle bool,
) ([]fileEntry, error) {
var files []fileEntry
switch encoding {
case "pem":
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
case "der":
if isSingle {
// For single file in DER, concatenate all cert DER bytes
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
// Individual DER file (should only have one cert)
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
case "both":
// Add PEM version
pemContent := encodeCertsToPEM(certs)
files = append(files, fileEntry{
name: baseName + ".pem",
content: pemContent,
})
// Add DER version
if isSingle {
var derContent []byte
for _, cert := range certs {
derContent = append(derContent, cert.Raw...)
}
files = append(files, fileEntry{
name: baseName + ".crt",
content: derContent,
})
} else if len(certs) > 0 {
files = append(files, fileEntry{
name: baseName + ".crt",
content: certs[0].Raw,
})
}
default:
return nil, fmt.Errorf("unsupported encoding: %s (must be 'pem', 'der', or 'both')", encoding)
}
return files, nil
}
// encodeCertsToPEM encodes certificates to PEM format.
func encodeCertsToPEM(certs []*x509.Certificate) []byte {
var pemContent []byte
for _, cert := range certs {
pemBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
pemContent = append(pemContent, pem.EncodeToMemory(pemBlock)...)
}
return pemContent
}
func generateManifest(files []fileEntry) []byte {
var manifest strings.Builder
for _, file := range files {
if file.name == "MANIFEST" {
continue
}
hash := sha256.Sum256(file.content)
manifest.WriteString(fmt.Sprintf("%x %s\n", hash, file.name))
}
return []byte(manifest.String())
}
// closeWithErr attempts to close all provided closers, joining any close errors with baseErr.
func closeWithErr(baseErr error, closers ...io.Closer) error {
for _, c := range closers {
if c == nil {
continue
}
if cerr := c.Close(); cerr != nil {
baseErr = errors.Join(baseErr, cerr)
}
}
return baseErr
}
func createZipArchive(path string, files []fileEntry) error {
f, zerr := os.Create(path)
if zerr != nil {
return zerr
}
w := zip.NewWriter(f)
for _, file := range files {
fw, werr := w.Create(file.name)
if werr != nil {
return closeWithErr(werr, w, f)
}
if _, werr = fw.Write(file.content); werr != nil {
return closeWithErr(werr, w, f)
}
}
// Check errors on close operations
if cerr := w.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func createTarGzArchive(path string, files []fileEntry) error {
f, terr := os.Create(path)
if terr != nil {
return terr
}
gw := gzip.NewWriter(f)
tw := tar.NewWriter(gw)
for _, file := range files {
hdr := &tar.Header{
Name: file.name,
Mode: 0644,
Size: int64(len(file.content)),
}
if herr := tw.WriteHeader(hdr); herr != nil {
return closeWithErr(herr, tw, gw, f)
}
if _, werr := tw.Write(file.content); werr != nil {
return closeWithErr(werr, tw, gw, f)
}
}
// Check errors on close operations in the correct order
if cerr := tw.Close(); cerr != nil {
_ = gw.Close()
_ = f.Close()
return cerr
}
if cerr := gw.Close(); cerr != nil {
_ = f.Close()
return cerr
}
return f.Close()
}
func generateHashFile(path string, files []string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
for _, file := range files {
data, rerr := os.ReadFile(file)
if rerr != nil {
return rerr
}
hash := sha256.Sum256(data)
fmt.Fprintf(f, "%x %s\n", hash, filepath.Base(file))
}
return nil
}

View File

@@ -1,197 +0,0 @@
This project is an exploration into the utility of Jetbrains' Junie
to write smaller but tedious programs.
Task: build a certificate bundling tool in cmd/cert-bundler. It
creates archives of certificates chains.
A YAML file for this looks something like:
``` yaml
config:
hashes: bundle.sha256
expiry: 1y
chains:
core_certs:
certs:
- root: roots/core-ca.pem
intermediates:
- int/cca1.pem
- int/cca2.pem
- int/cca3.pem
- root: roots/ssh-ca.pem
intermediates:
- ssh/ssh_dmz1.pem
- ssh/ssh_internal.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
```
Some requirements:
1. First, all the certificates should be loaded.
2. For each root, each of the indivudal intermediates should be
checked to make sure they are properly signed by the root CA.
3. The program should optionally take an expiration period (defaulting
to one year), specified in config.expiration, and if any certificate
is within that expiration period, a warning should be printed.
4. If outputs.include_single is true, all certificates under chains
should be concatenated into a single file.
5. If outputs.include_individual is true, all certificates under
chains should be included at the root level (e.g. int/cca2.pem
would be cca2.pem in the archive).
6. If bundle.manifest is true, a "MANIFEST" file is created with
SHA256 sums of each file included in the archive.
7. For each of the formats, create an archive file in the output
directory (specified with `-o`) with that format.
- If zip is included, create a .zip file.
- If tgz is included, create a .tar.gz file with default compression
levels.
- All archive files should include any generated files (single
and/or individual) in the top-level directory.
8. In the output directory, create a file with the same name as
config.hashes that contains the SHA256 sum of all files created.
-----
The outputs.include_single and outputs.include_individual describe
what should go in the final archive. If both are specified, the output
archive should include both a single bundle.pem and each individual
certificate, for example.
-----
As it stands, given the following `bundle.yaml`:
``` yaml
config:
hashes: bundle.sha256
expiry: 1y
chains:
core_certs:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
- root: pems/isrg-root-x1.pem
intermediates:
- pems/le-e7.pem
outputs:
include_single: true
include_individual: false
manifest: true
formats:
- zip
- tgz
google_certs:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
outputs:
include_single: true
include_individual: false
manifest: true
formats:
- tgz
lets_encrypt:
certs:
- root: pems/isrg-root-x1.pem
intermediates:
- pems/le-e7.pem
outputs:
include_single: false
include_individual: true
manifest: false
formats:
- zip
```
The program outputs the following files:
- bundle.sha256
- core_certs_0.tgz (contains individual certs)
- core_certs_0.zip (contains individual certs)
- core_certs_1.tgz (contains core_certs.pem)
- core_certs_1.zip (contains core_certs.pem)
- google_certs_0.tgz
- lets_encrypt_0.zip
It should output
- bundle.sha256
- core_certs.tgz
- core_certs.zip
- google_certs.tgz
- lets_encrypt.zip
core_certs.* should contain `bundle.pem` and all the individual
certs. There should be no _$n$ variants of archives.
-----
Add an additional field to outputs: encoding. It should accept one of
`der`, `pem`, or `both`. If `der`, certificates should be output as a
`.crt` file containing a DER-encoded certificate. If `pem`, certificates
should be output as a `.pem` file containing a PEM-encoded certificate.
If both, both the `.crt` and `.pem` certificate should be included.
For example, given the previous config, if `encoding` is der, the
google_certs.tgz archive should contain
- bundle.crt
- MANIFEST
Or with lets_encrypt.zip:
- isrg-root-x1.crt
- le-e7.crt
However, if `encoding` is pem, the lets_encrypt.zip archive should contain:
- isrg-root-x1.pem
- le-e7.pem
And if it `encoding` is both, the lets_encrypt.zip archive should contain:
- isrg-root-x1.crt
- isrg-root-x1.pem
- le-e7.crt
- le-e7.pem
-----
The tgz format should output a `.tar.gz` file instead of a `.tgz` file.
-----
Move the format extensions to a global variable.
-----
Write a README.txt with a description of the bundle.yaml format.
Additionally, update the help text for the program (e.g. with `-h`)
to provide the same detailed information.
-----
It may be easier to embed the README.txt in the program on build.
-----
For the archive (tar.gz and zip) writers, make sure errors are
checked at the end, and don't just defer the close operations.

View File

@@ -2,6 +2,19 @@ config:
hashes: bundle.sha256
expiry: 1y
chains:
weird:
certs:
- root: pems/gts-r1.pem
intermediates:
- pems/goog-wr2.pem
- root: pems/isrg-root-x1.pem
outputs:
include_single: true
include_individual: true
manifest: true
formats:
- zip
- tgz
core_certs:
certs:
- root: pems/gts-r1.pem

View File

@@ -1,4 +0,0 @@
5ed8bf9ed693045faa8a5cb0edc4a870052e56aef6291ce8b1604565affbc2a4 core_certs.zip
e59eddc590d2f7b790a87c5b56e81697088ab54be382c0e2c51b82034006d308 core_certs.tgz
51b9b63b1335118079e90700a3a5b847c363808e9116e576ca84f301bc433289 google_certs.tgz
3d1910ca8835c3ded1755a8c7d6c48083c2f3ff68b2bfbf932aaf27e29d0a232 lets_encrypt.zip

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

51
cmd/certser/main.go Normal file
View File

@@ -0,0 +1,51 @@
package main
import (
"crypto/x509"
"flag"
"fmt"
"strings"
"git.wntrmute.dev/kyle/goutils/certlib"
"git.wntrmute.dev/kyle/goutils/die"
"git.wntrmute.dev/kyle/goutils/lib"
)
const displayInt lib.HexEncodeMode = iota
func parseDisplayMode(mode string) lib.HexEncodeMode {
mode = strings.ToLower(mode)
if mode == "int" {
return displayInt
}
return lib.ParseHexEncodeMode(mode)
}
func serialString(cert *x509.Certificate, mode lib.HexEncodeMode) string {
if mode == displayInt {
return cert.SerialNumber.String()
}
return lib.HexEncode(cert.SerialNumber.Bytes(), mode)
}
func main() {
displayAs := flag.String("d", "int", "display mode (int, hex, uhex)")
showExpiry := flag.Bool("e", false, "show expiry date")
flag.Parse()
displayMode := parseDisplayMode(*displayAs)
for _, arg := range flag.Args() {
cert, err := certlib.LoadCertificate(arg)
die.If(err)
fmt.Printf("%s: %s", arg, serialString(cert, displayMode))
if *showExpiry {
fmt.Printf(" (%s)", cert.NotAfter.Format("2006-01-02"))
}
fmt.Println()
}
}

1
go.mod
View File

@@ -22,4 +22,5 @@ require (
github.com/kr/pretty v0.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
rsc.io/qr v0.2.0 // indirect
)

2
go.sum
View File

@@ -44,3 +44,5 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=

View File

@@ -2,9 +2,11 @@
package lib
import (
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"time"
)
@@ -109,3 +111,111 @@ func Duration(d time.Duration) string {
s += fmt.Sprintf("%dh%s", hours, d)
return s
}
type HexEncodeMode uint8
const (
// HexEncodeLower prints the bytes as lowercase hexadecimal.
HexEncodeLower HexEncodeMode = iota + 1
// HexEncodeUpper prints the bytes as uppercase hexadecimal.
HexEncodeUpper
// HexEncodeLowerColon prints the bytes as lowercase hexadecimal
// with colons between each pair of bytes.
HexEncodeLowerColon
// HexEncodeUpperColon prints the bytes as uppercase hexadecimal
// with colons between each pair of bytes.
HexEncodeUpperColon
)
func (m HexEncodeMode) String() string {
switch m {
case HexEncodeLower:
return "lower"
case HexEncodeUpper:
return "upper"
case HexEncodeLowerColon:
return "lcolon"
case HexEncodeUpperColon:
return "ucolon"
default:
panic("invalid hex encode mode")
}
}
func ParseHexEncodeMode(s string) HexEncodeMode {
switch strings.ToLower(s) {
case "lower":
return HexEncodeLower
case "upper":
return HexEncodeUpper
case "lcolon":
return HexEncodeLowerColon
case "ucolon":
return HexEncodeUpperColon
}
panic("invalid hex encode mode")
}
func hexColons(s string) string {
if len(s)%2 != 0 {
fmt.Fprintf(os.Stderr, "hex string: %s\n", s)
fmt.Fprintf(os.Stderr, "hex length: %d\n", len(s))
panic("invalid hex string length")
}
n := len(s)
if n <= 2 {
return s
}
pairCount := n / 2
if n%2 != 0 {
pairCount++
}
var b strings.Builder
b.Grow(n + pairCount - 1)
for i := 0; i < n; i += 2 {
b.WriteByte(s[i])
if i+1 < n {
b.WriteByte(s[i+1])
}
if i+2 < n {
b.WriteByte(':')
}
}
return b.String()
}
func hexEncode(b []byte) string {
s := hex.EncodeToString(b)
if len(s)%2 != 0 {
s = "0" + s
}
return s
}
// HexEncode encodes the given bytes as a hexadecimal string.
func HexEncode(b []byte, mode HexEncodeMode) string {
str := hexEncode(b)
switch mode {
case HexEncodeLower:
return str
case HexEncodeUpper:
return strings.ToUpper(str)
case HexEncodeLowerColon:
return hexColons(str)
case HexEncodeUpperColon:
return strings.ToUpper(hexColons(str))
default:
panic("invalid hex encode mode")
}
}

79
lib/lib_test.go Normal file
View File

@@ -0,0 +1,79 @@
package lib_test
import (
"testing"
"git.wntrmute.dev/kyle/goutils/lib"
)
func TestHexEncode_LowerUpper(t *testing.T) {
b := []byte{0x0f, 0xa1, 0x00, 0xff}
gotLower := lib.HexEncode(b, lib.HexEncodeLower)
if gotLower != "0fa100ff" {
t.Fatalf("lib.HexEncode lower: expected %q, got %q", "0fa100ff", gotLower)
}
gotUpper := lib.HexEncode(b, lib.HexEncodeUpper)
if gotUpper != "0FA100FF" {
t.Fatalf("lib.HexEncode upper: expected %q, got %q", "0FA100FF", gotUpper)
}
}
func TestHexEncode_ColonModes(t *testing.T) {
// Includes leading zero nibble and a zero byte to verify padding and separators
b := []byte{0x0f, 0xa1, 0x00, 0xff}
gotLColon := lib.HexEncode(b, lib.HexEncodeLowerColon)
if gotLColon != "0f:a1:00:ff" {
t.Fatalf("lib.HexEncode colon lower: expected %q, got %q", "0f:a1:00:ff", gotLColon)
}
gotUColon := lib.HexEncode(b, lib.HexEncodeUpperColon)
if gotUColon != "0F:A1:00:FF" {
t.Fatalf("lib.HexEncode colon upper: expected %q, got %q", "0F:A1:00:FF", gotUColon)
}
}
func TestHexEncode_EmptyInput(t *testing.T) {
var b []byte
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "" {
t.Fatalf("empty lower: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "" {
t.Fatalf("empty upper: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "" {
t.Fatalf("empty colon lower: expected empty string, got %q", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "" {
t.Fatalf("empty colon upper: expected empty string, got %q", got)
}
}
func TestHexEncode_SingleByte(t *testing.T) {
b := []byte{0x0f}
if got := lib.HexEncode(b, lib.HexEncodeLower); got != "0f" {
t.Fatalf("single byte lower: expected %q, got %q", "0f", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpper); got != "0F" {
t.Fatalf("single byte upper: expected %q, got %q", "0F", got)
}
// For a single byte, colon modes should not introduce separators
if got := lib.HexEncode(b, lib.HexEncodeLowerColon); got != "0f" {
t.Fatalf("single byte colon lower: expected %q, got %q", "0f", got)
}
if got := lib.HexEncode(b, lib.HexEncodeUpperColon); got != "0F" {
t.Fatalf("single byte colon upper: expected %q, got %q", "0F", got)
}
}
func TestHexEncode_InvalidModePanics(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("expected panic for invalid mode, but function returned normally")
}
}()
// 0 is not a valid lib.HexEncodeMode (valid modes start at 1)
_ = lib.HexEncode([]byte{0x01}, lib.HexEncodeMode(0))
}

View File

@@ -1,42 +0,0 @@
# Use the latest 2.1 version of CircleCI pipeline process engine.
# See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
testbuild:
working_directory: ~/repo
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
docker:
- image: cimg/go:1.22.2
# Add steps to the job
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
steps:
- checkout
- restore_cache:
keys:
- go-mod-v4-{{ checksum "go.sum" }}
- run:
name: Install Dependencies
command: go mod download
- save_cache:
key: go-mod-v4-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- run:
name: Run tests
command: go test ./...
- run:
name: Run build
command: go build ./...
- store_test_results:
path: /tmp/test-reports
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
workflows:
testbuild:
jobs:
- testbuild

View File

@@ -1,4 +1,4 @@
// twofactor implements two-factor authentication.
// Package twofactor implements two-factor authentication.
//
// Currently supported are RFC 4226 HOTP one-time passwords and
// RFC 6238 TOTP SHA-1 one-time passwords.

View File

@@ -2,7 +2,7 @@ package twofactor
import (
"crypto"
"crypto/sha1"
"crypto/sha1" // #nosec G505 - required by RFC
"encoding/base32"
"io"
"net/url"
@@ -15,11 +15,6 @@ type HOTP struct {
*OATH
}
// Type returns OATH_HOTP.
func (otp *HOTP) Type() Type {
return OATH_HOTP
}
// NewHOTP takes the key, the initial counter value, and the number
// of digits (typically 6 or 8) and returns a new HOTP instance.
func NewHOTP(key []byte, counter uint64, digits int) *HOTP {
@@ -34,6 +29,11 @@ func NewHOTP(key []byte, counter uint64, digits int) *HOTP {
}
}
// Type returns OATH_HOTP.
func (otp *HOTP) Type() Type {
return OATH_HOTP
}
// OTP returns the next OTP and increments the counter.
func (otp *HOTP) OTP() string {
code := otp.OATH.OTP(otp.counter)
@@ -79,7 +79,7 @@ func hotpFromURL(u *url.URL) (*HOTP, string, error) {
digits = int(tmpDigits)
}
var counter uint64 = 0
var counter uint64
if scounter := v.Get("counter"); scounter != "" {
var err error
counter, err = strconv.ParseUint(scounter, 10, 64)

View File

@@ -1,7 +1,6 @@
package twofactor
import (
"fmt"
"testing"
)
@@ -25,22 +24,19 @@ var rfcHotpExpected = []string{
// ensures that this implementation is in compliance.
func TestHotpRFC(t *testing.T) {
otp := NewHOTP(rfcHotpKey, 0, 6)
for i := 0; i < len(rfcHotpExpected); i++ {
for i := range rfcHotpExpected {
if otp.Counter() != uint64(i) {
fmt.Printf("twofactor: invalid counter (should be %d, is %d",
t.Fatalf("twofactor: invalid counter (should be %d, is %d",
i, otp.Counter())
t.FailNow()
}
code := otp.OTP()
if code == "" {
fmt.Printf("twofactor: failed to produce an OTP\n")
t.FailNow()
t.Fatal("twofactor: failed to produce an OTP")
} else if code != rfcHotpExpected[i] {
fmt.Printf("twofactor: invalid OTP\n")
fmt.Printf("\tExpected: %s\n", rfcHotpExpected[i])
fmt.Printf("\t Actual: %s\n", code)
fmt.Printf("\t Counter: %d\n", otp.counter)
t.FailNow()
t.Logf("twofactor: invalid OTP\n")
t.Logf("\tExpected: %s\n", rfcHotpExpected[i])
t.Logf("\t Actual: %s\n", code)
t.Fatalf("\t Counter: %d\n", otp.counter)
}
}
}
@@ -50,15 +46,13 @@ func TestHotpRFC(t *testing.T) {
// expected.
func TestHotpBadRFC(t *testing.T) {
otp := NewHOTP(testKey, 0, 6)
for i := 0; i < len(rfcHotpExpected); i++ {
for i := range rfcHotpExpected {
code := otp.OTP()
switch code {
case "":
fmt.Printf("twofactor: failed to produce an OTP\n")
t.FailNow()
t.Error("twofactor: failed to produce an OTP")
case rfcHotpExpected[i]:
fmt.Printf("twofactor: should not have received a valid OTP\n")
t.FailNow()
t.Error("twofactor: should not have received a valid OTP")
}
}
}

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"hash"
"net/url"
"strconv"
"rsc.io/qr"
)
@@ -25,12 +26,12 @@ type OATH struct {
}
// Size returns the output size (in characters) of the password.
func (o OATH) Size() int {
func (o *OATH) Size() int {
return o.size
}
// Counter returns the OATH token's counter.
func (o OATH) Counter() uint64 {
func (o *OATH) Counter() uint64 {
return o.counter
}
@@ -40,18 +41,18 @@ func (o *OATH) SetCounter(counter uint64) {
}
// Key returns the token's secret key.
func (o OATH) Key() []byte {
return o.key[:]
func (o *OATH) Key() []byte {
return o.key
}
// Hash returns the token's hash function.
func (o OATH) Hash() func() hash.Hash {
func (o *OATH) Hash() func() hash.Hash {
return o.hash
}
// URL constructs a URL appropriate for the token (i.e. for use in a
// QR code).
func (o OATH) URL(t Type, label string) string {
func (o *OATH) URL(t Type, label string) string {
secret := base32.StdEncoding.EncodeToString(o.key)
u := url.URL{}
v := url.Values{}
@@ -65,10 +66,10 @@ func (o OATH) URL(t Type, label string) string {
u.Path = label
v.Add("secret", secret)
if o.Counter() != 0 && t == OATH_HOTP {
v.Add("counter", fmt.Sprintf("%d", o.Counter()))
v.Add("counter", strconv.FormatUint(o.Counter(), 10))
}
if o.Size() != defaultSize {
v.Add("digits", fmt.Sprintf("%d", o.Size()))
v.Add("digits", strconv.Itoa(o.Size()))
}
switch o.algo {
@@ -84,7 +85,6 @@ func (o OATH) URL(t Type, label string) string {
u.RawQuery = v.Encode()
return u.String()
}
var digits = []int64{
@@ -101,10 +101,10 @@ var digits = []int64{
10: 10000000000,
}
// The top-level type should provide a counter; for example, HOTP
// OTP top-level type should provide a counter; for example, HOTP
// will provide the counter directly while TOTP will provide the
// time-stepped counter.
func (o OATH) OTP(counter uint64) string {
func (o *OATH) OTP(counter uint64) string {
var ctr [8]byte
binary.BigEndian.PutUint64(ctr[:], counter)
@@ -140,7 +140,7 @@ func truncate(in []byte) int64 {
// QR generates a byte slice containing the a QR code encoded as a
// PNG with level Q error correction.
func (o OATH) QR(t Type, label string) ([]byte, error) {
func (o *OATH) QR(t Type, label string) ([]byte, error) {
u := o.URL(t, label)
code, err := qr.Encode(u, qr.Q)
if err != nil {

View File

@@ -1,7 +1,6 @@
package twofactor
import (
"fmt"
"testing"
)
@@ -17,14 +16,12 @@ var truncExpect int64 = 0x50ef7f19
// This test runs through the truncation example given in the RFC.
func TestTruncate(t *testing.T) {
if result := truncate(sha1Hmac); result != truncExpect {
fmt.Printf("hotp: expected truncate -> %d, saw %d\n",
t.Fatalf("hotp: expected truncate -> %d, saw %d\n",
truncExpect, result)
t.FailNow()
}
sha1Hmac[19]++
if result := truncate(sha1Hmac); result == truncExpect {
fmt.Println("hotp: expected truncation to fail")
t.FailNow()
t.Fatal("hotp: expected truncation to fail")
}
}

View File

@@ -24,7 +24,7 @@ var (
ErrInvalidAlgo = errors.New("twofactor: invalid algorithm")
)
// Type OTP represents a one-time password token -- whether a
// OTP represents a one-time password token -- whether a
// software taken (as in the case of Google Authenticator) or a
// hardware token (as in the case of a YubiKey).
type OTP interface {
@@ -65,8 +65,8 @@ func otpString(otp OTP) string {
}
// FromURL constructs a new OTP token from a URL string.
func FromURL(URL string) (OTP, string, error) {
u, err := url.Parse(URL)
func FromURL(otpURL string) (OTP, string, error) {
u, err := url.Parse(otpURL)
if err != nil {
return nil, "", err
}

View File

@@ -1,7 +1,6 @@
package twofactor
import (
"fmt"
"io"
"testing"
)
@@ -10,8 +9,7 @@ func TestHOTPString(t *testing.T) {
hotp := NewHOTP(nil, 0, 6)
hotpString := otpString(hotp)
if hotpString != "OATH-HOTP, 6" {
fmt.Println("twofactor: invalid OTP string")
t.FailNow()
t.Fatal("twofactor: invalid OTP string")
}
}
@@ -23,35 +21,32 @@ func TestURL(t *testing.T) {
otp := NewHOTP(testKey, 0, 6)
url := otp.URL("testuser@foo")
otp2, id, err := FromURL(url)
if err != nil {
fmt.Printf("hotp: failed to parse HOTP URL\n")
t.FailNow()
} else if id != ident {
fmt.Printf("hotp: bad label\n")
fmt.Printf("\texpected: %s\n", ident)
fmt.Printf("\t actual: %s\n", id)
t.FailNow()
} else if otp2.Counter() != otp.Counter() {
fmt.Printf("hotp: OTP counters aren't synced\n")
fmt.Printf("\toriginal: %d\n", otp.Counter())
fmt.Printf("\t second: %d\n", otp2.Counter())
t.FailNow()
switch {
case err != nil:
t.Fatal("hotp: failed to parse HOTP URL\n")
case id != ident:
t.Logf("hotp: bad label\n")
t.Logf("\texpected: %s\n", ident)
t.Fatalf("\t actual: %s\n", id)
case otp2.Counter() != otp.Counter():
t.Logf("hotp: OTP counters aren't synced\n")
t.Logf("\toriginal: %d\n", otp.Counter())
t.Fatalf("\t second: %d\n", otp2.Counter())
}
code1 := otp.OTP()
code2 := otp2.OTP()
if code1 != code2 {
fmt.Printf("hotp: mismatched OTPs\n")
fmt.Printf("\texpected: %s\n", code1)
fmt.Printf("\t actual: %s\n", code2)
t.Logf("hotp: mismatched OTPs\n")
t.Logf("\texpected: %s\n", code1)
t.Fatalf("\t actual: %s\n", code2)
}
// There's not much we can do test the QR code, except to
// ensure it doesn't fail.
_, err = otp.QR(ident)
if err != nil {
fmt.Printf("hotp: failed to generate QR code PNG (%v)\n", err)
t.FailNow()
t.Fatalf("hotp: failed to generate QR code PNG (%v)\n", err)
}
// This should fail because the maximum size of an alphanumeric
@@ -63,16 +58,14 @@ func TestURL(t *testing.T) {
var tooBigIdent = make([]byte, 8192)
_, err = io.ReadFull(PRNG, tooBigIdent)
if err != nil {
fmt.Printf("hotp: failed to read identity (%v)\n", err)
t.FailNow()
t.Fatalf("hotp: failed to read identity (%v)\n", err)
} else if _, err = otp.QR(string(tooBigIdent)); err == nil {
fmt.Println("hotp: QR code should fail to encode oversized URL")
t.FailNow()
t.Fatal("hotp: QR code should fail to encode oversized URL")
}
}
// This test makes sure we can generate codes for padded and non-padded
// entries
// entries.
func TestPaddedURL(t *testing.T) {
var urlList = []string{
"otpauth://hotp/?secret=ME",
@@ -95,17 +88,15 @@ func TestPaddedURL(t *testing.T) {
for i := range urlList {
if o, id, err := FromURL(urlList[i]); err != nil {
fmt.Println("hotp: URL should have parsed successfully (id=", id, ")")
fmt.Printf("\turl was: %s\n", urlList[i])
t.FailNow()
fmt.Printf("\t%s, %s\n", o.OTP(), id)
t.Log("hotp: URL should have parsed successfully (id=", id, ")")
t.Logf("\turl was: %s\n", urlList[i])
t.Fatalf("\t%s, %s\n", o.OTP(), id)
} else {
code2 := o.OTP()
if code2 != codeList[i] {
fmt.Printf("hotp: mismatched OTPs\n")
fmt.Printf("\texpected: %s\n", codeList[i])
fmt.Printf("\t actual: %s\n", code2)
t.FailNow()
t.Logf("hotp: mismatched OTPs\n")
t.Logf("\texpected: %s\n", codeList[i])
t.Fatalf("\t actual: %s\n", code2)
}
}
}
@@ -128,9 +119,8 @@ func TestBadURL(t *testing.T) {
for i := range urlList {
if _, _, err := FromURL(urlList[i]); err == nil {
fmt.Println("hotp: URL should not have parsed successfully")
fmt.Printf("\turl was: %s\n", urlList[i])
t.FailNow()
t.Log("hotp: URL should not have parsed successfully")
t.Fatalf("\turl was: %s\n", urlList[i])
}
}
}

View File

@@ -2,7 +2,7 @@ package twofactor
import (
"crypto"
"crypto/sha1"
"crypto/sha1" // #nosec G505 - required by RFC
"crypto/sha256"
"crypto/sha512"
"encoding/base32"
@@ -23,6 +23,42 @@ type TOTP struct {
step uint64
}
// NewTOTP takes a new key, a starting time, a step, the number of
// digits of output (typically 6 or 8) and the hash algorithm to
// use, and builds a new OTP.
func NewTOTP(key []byte, start uint64, step uint64, digits int, algo crypto.Hash) *TOTP {
h := hashFromAlgo(algo)
if h == nil {
return nil
}
return &TOTP{
OATH: &OATH{
key: key,
counter: start,
size: digits,
hash: h,
algo: algo,
},
step: step,
}
}
// NewGoogleTOTP takes a secret as a base32-encoded string and
// returns an appropriate Google Authenticator TOTP instance.
func NewGoogleTOTP(secret string) (*TOTP, error) {
key, err := base32.StdEncoding.DecodeString(secret)
if err != nil {
return nil, err
}
return NewTOTP(key, 0, 30, 6, crypto.SHA1), nil
}
// NewTOTPSHA1 will build a new TOTP using SHA-1.
func NewTOTPSHA1(key []byte, start uint64, step uint64, digits int) *TOTP {
return NewTOTP(key, start, step, digits, crypto.SHA1)
}
// Type returns OATH_TOTP.
func (otp *TOTP) Type() Type {
return OATH_TOTP
@@ -53,34 +89,7 @@ func (otp *TOTP) otpCounter(t uint64) uint64 {
// OTPCounter returns the current time value for the OTP.
func (otp *TOTP) OTPCounter() uint64 {
return otp.otpCounter(uint64(timeSource.Now().Unix()))
}
// NewTOTP takes a new key, a starting time, a step, the number of
// digits of output (typically 6 or 8) and the hash algorithm to
// use, and builds a new OTP.
func NewTOTP(key []byte, start uint64, step uint64, digits int, algo crypto.Hash) *TOTP {
h := hashFromAlgo(algo)
if h == nil {
return nil
}
return &TOTP{
OATH: &OATH{
key: key,
counter: start,
size: digits,
hash: h,
algo: algo,
},
step: step,
}
}
// NewTOTPSHA1 will build a new TOTP using SHA-1.
func NewTOTPSHA1(key []byte, start uint64, step uint64, digits int) *TOTP {
return NewTOTP(key, start, step, digits, crypto.SHA1)
return otp.otpCounter(uint64(timeSource.Now().Unix() & 0x7FFFFFFF)) //#nosec G115 - masked out overflow bits
}
func hashFromAlgo(algo crypto.Hash) func() hash.Hash {
@@ -105,16 +114,6 @@ func GenerateGoogleTOTP() *TOTP {
return NewTOTP(key, 0, 30, 6, crypto.SHA1)
}
// NewGoogleTOTP takes a secret as a base32-encoded string and
// returns an appropriate Google Authenticator TOTP instance.
func NewGoogleTOTP(secret string) (*TOTP, error) {
key, err := base32.StdEncoding.DecodeString(secret)
if err != nil {
return nil, err
}
return NewTOTP(key, 0, 30, 6, crypto.SHA1), nil
}
func totpFromURL(u *url.URL) (*TOTP, string, error) {
label := u.Path[1:]
v := u.Query()
@@ -126,11 +125,12 @@ func totpFromURL(u *url.URL) (*TOTP, string, error) {
var algo = crypto.SHA1
if algorithm := v.Get("algorithm"); algorithm != "" {
if strings.EqualFold(algorithm, "SHA256") {
switch {
case strings.EqualFold(algorithm, "SHA256"):
algo = crypto.SHA256
} else if strings.EqualFold(algorithm, "SHA512") {
case strings.EqualFold(algorithm, "SHA512"):
algo = crypto.SHA512
} else if !strings.EqualFold(algorithm, "SHA1") {
case !strings.EqualFold(algorithm, "SHA1"):
return nil, "", ErrInvalidAlgo
}
}

View File

@@ -2,7 +2,6 @@ package twofactor
import (
"crypto"
"fmt"
"testing"
"time"
@@ -14,6 +13,7 @@ var rfcTotpKey = map[crypto.Hash][]byte{
crypto.SHA256: []byte("12345678901234567890123456789012"),
crypto.SHA512: []byte("1234567890123456789012345678901234567890123456789012345678901234"),
}
var rfcTotpStep uint64 = 30
var rfcTotpTests = []struct {
@@ -46,17 +46,15 @@ func TestTotpRFC(t *testing.T) {
for _, tc := range rfcTotpTests {
otp := NewTOTP(rfcTotpKey[tc.Algo], 0, rfcTotpStep, 8, tc.Algo)
if otp.otpCounter(tc.Time) != tc.T {
fmt.Printf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
fmt.Printf("\texpected: %d\n", tc.T)
fmt.Printf("\t actual: %d\n", otp.otpCounter(tc.Time))
t.Fail()
t.Logf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
t.Logf("\texpected: %d\n", tc.T)
t.Errorf("\t actual: %d\n", otp.otpCounter(tc.Time))
}
if code := otp.otp(otp.otpCounter(tc.Time)); code != tc.Code {
fmt.Printf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
fmt.Printf("\texpected: %s\n", tc.Code)
fmt.Printf("\t actual: %s\n", code)
t.Fail()
t.Logf("twofactor: invalid TOTP (t=%d, h=%d)\n", tc.Time, tc.Algo)
t.Logf("\texpected: %s\n", tc.Code)
t.Errorf("\t actual: %s\n", code)
}
}
}

View File

@@ -5,7 +5,7 @@ import (
)
// Pad calculates the number of '='s to add to our encoded string
// to make base32.StdEncoding.DecodeString happy
// to make base32.StdEncoding.DecodeString happy.
func Pad(s string) string {
if !strings.HasSuffix(s, "=") && len(s)%8 != 0 {
for len(s)%8 != 0 {

View File

@@ -1,11 +1,12 @@
package twofactor
package twofactor_test
import (
"encoding/base32"
"fmt"
"math/rand"
"strings"
"testing"
"git.wntrmute.dev/kyle/goutils/twofactor"
)
const letters = "1234567890!@#$%^&*()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -19,34 +20,31 @@ func randString() string {
}
func TestPadding(t *testing.T) {
for i := 0; i < 300; i++ {
for range 300 {
b := randString()
origEncoding := string(b)
modEncoding := strings.ReplaceAll(string(b), "=", "")
origEncoding := b
modEncoding := strings.ReplaceAll(b, "=", "")
str, err := base32.StdEncoding.DecodeString(origEncoding)
if err != nil {
fmt.Println("Can't decode: ", string(b))
t.FailNow()
t.Fatal("Can't decode: ", b)
}
paddedEncoding := Pad(modEncoding)
paddedEncoding := twofactor.Pad(modEncoding)
if origEncoding != paddedEncoding {
fmt.Println("Padding failed:")
fmt.Printf("Expected: '%s'", origEncoding)
fmt.Printf("Got: '%s'", paddedEncoding)
t.FailNow()
t.Log("Padding failed:")
t.Logf("Expected: '%s'", origEncoding)
t.Fatalf("Got: '%s'", paddedEncoding)
} else {
mstr, err := base32.StdEncoding.DecodeString(paddedEncoding)
var mstr []byte
mstr, err = base32.StdEncoding.DecodeString(paddedEncoding)
if err != nil {
fmt.Println("Can't decode: ", paddedEncoding)
t.FailNow()
t.Fatal("Can't decode: ", paddedEncoding)
}
if string(mstr) != string(str) {
fmt.Println("Re-padding failed:")
fmt.Printf("Expected: '%s'", str)
fmt.Printf("Got: '%s'", mstr)
t.FailNow()
t.Log("Re-padding failed:")
t.Logf("Expected: '%s'", str)
t.Fatalf("Got: '%s'", mstr)
}
}
}