Add Nix flake for mcproxyctl
Vendor dependencies and expose mcproxyctl binary via nix build. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
||||
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
||||
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest with a zero seed.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.ResetWithSeed(seed)
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ n, h
|
||||
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go_import_path: github.com/dustin/go-humanize
|
||||
go:
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- 1.16.x
|
||||
- stable
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go vet .
|
||||
- go install -v -race ./...
|
||||
- go test -v -race ./...
|
||||
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
<http://www.opensource.org/licenses/mit-license.php>
|
||||
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize)
|
||||
|
||||
Just a few functions for helping humanize times and sizes.
|
||||
|
||||
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||
|
||||
See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
|
||||
complete documentation.
|
||||
|
||||
## Sizes
|
||||
|
||||
This lets you take numbers like `82854982` and convert them to useful
|
||||
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
||||
```
|
||||
|
||||
## Times
|
||||
|
||||
This lets you take a `time.Time` and spit it out in relative terms.
|
||||
For example, `12 seconds ago` or `3 days from now`.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
||||
```
|
||||
|
||||
Thanks to Kyle Lemons for the time implementation from an IRC
|
||||
conversation one day. It's pretty neat.
|
||||
|
||||
## Ordinals
|
||||
|
||||
From a [mailing list discussion][odisc] where a user wanted to be able
|
||||
to label ordinals.
|
||||
|
||||
0 -> 0th
|
||||
1 -> 1st
|
||||
2 -> 2nd
|
||||
3 -> 3rd
|
||||
4 -> 4th
|
||||
[...]
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
||||
```
|
||||
|
||||
## Commas
|
||||
|
||||
Want to shove commas into numbers? Be my guest.
|
||||
|
||||
0 -> 0
|
||||
100 -> 100
|
||||
1000 -> 1,000
|
||||
1000000000 -> 1,000,000,000
|
||||
-100000 -> -100,000
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
||||
```
|
||||
|
||||
## Ftoa
|
||||
|
||||
Nicer float64 formatter that removes trailing zeros.
|
||||
|
||||
```go
|
||||
fmt.Printf("%f", 2.24) // 2.240000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
||||
fmt.Printf("%f", 2.0) // 2.000000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
||||
```
|
||||
|
||||
## SI notation
|
||||
|
||||
Format numbers with [SI notation][sinotation].
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
humanize.SI(0.00000000223, "M") // 2.23 nM
|
||||
```
|
||||
|
||||
## English-specific functions
|
||||
|
||||
The following functions are in the `humanize/english` subpackage.
|
||||
|
||||
### Plurals
|
||||
|
||||
Simple English pluralization
|
||||
|
||||
```go
|
||||
english.PluralWord(1, "object", "") // object
|
||||
english.PluralWord(42, "object", "") // objects
|
||||
english.PluralWord(2, "bus", "") // buses
|
||||
english.PluralWord(99, "locus", "loci") // loci
|
||||
|
||||
english.Plural(1, "object", "") // 1 object
|
||||
english.Plural(42, "object", "") // 42 objects
|
||||
english.Plural(2, "bus", "") // 2 buses
|
||||
english.Plural(99, "locus", "loci") // 99 loci
|
||||
```
|
||||
|
||||
### Word series
|
||||
|
||||
Format comma-separated words lists with conjuctions:
|
||||
|
||||
```go
|
||||
english.WordSeries([]string{"foo"}, "and") // foo
|
||||
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
||||
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
||||
|
||||
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
||||
```
|
||||
|
||||
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
||||
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
||||
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// order of magnitude (to a max order)
|
||||
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
if mag == maxmag && maxmag >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
||||
|
||||
// total order of magnitude
|
||||
// (same as above, but with no upper limit)
|
||||
func oom(n, b *big.Int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
||||
189
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
189
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
bigIECExp = big.NewInt(1024)
|
||||
|
||||
// BigByte is one byte in bit.Ints
|
||||
BigByte = big.NewInt(1)
|
||||
// BigKiByte is 1,024 bytes in bit.Ints
|
||||
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
||||
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
||||
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
||||
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
||||
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
||||
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
||||
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||
// BigRiByte is 1,024 y bytes in bit.Ints
|
||||
BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
|
||||
// BigQiByte is 1,024 r bytes in bit.Ints
|
||||
BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
|
||||
)
|
||||
|
||||
var (
|
||||
bigSIExp = big.NewInt(1000)
|
||||
|
||||
// BigSIByte is one SI byte in big.Ints
|
||||
BigSIByte = big.NewInt(1)
|
||||
// BigKByte is 1,000 SI bytes in big.Ints
|
||||
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
||||
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
||||
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
||||
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
||||
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
||||
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
||||
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||
// BigRByte is 1,000 SI y bytes in big.Ints
|
||||
BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
|
||||
// BigQByte is 1,000 SI r bytes in big.Ints
|
||||
BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
|
||||
)
|
||||
|
||||
var bigBytesSizeTable = map[string]*big.Int{
|
||||
"b": BigByte,
|
||||
"kib": BigKiByte,
|
||||
"kb": BigKByte,
|
||||
"mib": BigMiByte,
|
||||
"mb": BigMByte,
|
||||
"gib": BigGiByte,
|
||||
"gb": BigGByte,
|
||||
"tib": BigTiByte,
|
||||
"tb": BigTByte,
|
||||
"pib": BigPiByte,
|
||||
"pb": BigPByte,
|
||||
"eib": BigEiByte,
|
||||
"eb": BigEByte,
|
||||
"zib": BigZiByte,
|
||||
"zb": BigZByte,
|
||||
"yib": BigYiByte,
|
||||
"yb": BigYByte,
|
||||
"rib": BigRiByte,
|
||||
"rb": BigRByte,
|
||||
"qib": BigQiByte,
|
||||
"qb": BigQByte,
|
||||
// Without suffix
|
||||
"": BigByte,
|
||||
"ki": BigKiByte,
|
||||
"k": BigKByte,
|
||||
"mi": BigMiByte,
|
||||
"m": BigMByte,
|
||||
"gi": BigGiByte,
|
||||
"g": BigGByte,
|
||||
"ti": BigTiByte,
|
||||
"t": BigTByte,
|
||||
"pi": BigPiByte,
|
||||
"p": BigPByte,
|
||||
"ei": BigEiByte,
|
||||
"e": BigEByte,
|
||||
"z": BigZByte,
|
||||
"zi": BigZiByte,
|
||||
"y": BigYByte,
|
||||
"yi": BigYiByte,
|
||||
"r": BigRByte,
|
||||
"ri": BigRiByte,
|
||||
"q": BigQByte,
|
||||
"qi": BigQiByte,
|
||||
}
|
||||
|
||||
var ten = big.NewInt(10)
|
||||
|
||||
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||
if s.Cmp(ten) < 0 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
c := (&big.Int{}).Set(s)
|
||||
val, mag := oomm(c, base, len(sizes)-1)
|
||||
suffix := sizes[mag]
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
|
||||
}
|
||||
|
||||
// BigBytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
|
||||
return humanateBigBytes(s, bigSIExp, sizes)
|
||||
}
|
||||
|
||||
// BigIBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
|
||||
return humanateBigBytes(s, bigIECExp, sizes)
|
||||
}
|
||||
|
||||
// ParseBigBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See also: BigBytes, BigIBytes.
|
||||
//
|
||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||
func ParseBigBytes(s string) (*big.Int, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
val := &big.Rat{}
|
||||
_, err := fmt.Sscanf(num, "%f", val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bigBytesSizeTable[extra]; ok {
|
||||
mv := (&big.Rat{}).SetInt(m)
|
||||
val.Mul(val, mv)
|
||||
rv := &big.Int{}
|
||||
rv.Div(val.Num(), val.Denom())
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
||||
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// IEC Sizes.
|
||||
// kibis of bits
|
||||
const (
|
||||
Byte = 1 << (iota * 10)
|
||||
KiByte
|
||||
MiByte
|
||||
GiByte
|
||||
TiByte
|
||||
PiByte
|
||||
EiByte
|
||||
)
|
||||
|
||||
// SI Sizes.
|
||||
const (
|
||||
IByte = 1
|
||||
KByte = IByte * 1000
|
||||
MByte = KByte * 1000
|
||||
GByte = MByte * 1000
|
||||
TByte = GByte * 1000
|
||||
PByte = TByte * 1000
|
||||
EByte = PByte * 1000
|
||||
)
|
||||
|
||||
var bytesSizeTable = map[string]uint64{
|
||||
"b": Byte,
|
||||
"kib": KiByte,
|
||||
"kb": KByte,
|
||||
"mib": MiByte,
|
||||
"mb": MByte,
|
||||
"gib": GiByte,
|
||||
"gb": GByte,
|
||||
"tib": TiByte,
|
||||
"tb": TByte,
|
||||
"pib": PiByte,
|
||||
"pb": PByte,
|
||||
"eib": EiByte,
|
||||
"eb": EByte,
|
||||
// Without suffix
|
||||
"": Byte,
|
||||
"ki": KiByte,
|
||||
"k": KByte,
|
||||
"mi": MiByte,
|
||||
"m": MByte,
|
||||
"gi": GiByte,
|
||||
"g": GByte,
|
||||
"ti": TiByte,
|
||||
"t": TByte,
|
||||
"pi": PiByte,
|
||||
"p": PByte,
|
||||
"ei": EiByte,
|
||||
"e": EByte,
|
||||
}
|
||||
|
||||
func logn(n, b float64) float64 {
|
||||
return math.Log(n) / math.Log(b)
|
||||
}
|
||||
|
||||
func humanateBytes(s uint64, base float64, sizes []string) string {
|
||||
if s < 10 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
e := math.Floor(logn(float64(s), base))
|
||||
suffix := sizes[int(e)]
|
||||
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
}
|
||||
|
||||
// Bytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// Bytes(82854982) -> 83 MB
|
||||
func Bytes(s uint64) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||
return humanateBytes(s, 1000, sizes)
|
||||
}
|
||||
|
||||
// IBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// IBytes(82854982) -> 79 MiB
|
||||
func IBytes(s uint64) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||
return humanateBytes(s, 1024, sizes)
|
||||
}
|
||||
|
||||
// ParseBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See Also: Bytes, IBytes.
|
||||
//
|
||||
// ParseBytes("42 MB") -> 42000000, nil
|
||||
// ParseBytes("42 mib") -> 44040192, nil
|
||||
func ParseBytes(s string) (uint64, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(num, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bytesSizeTable[extra]; ok {
|
||||
f *= float64(m)
|
||||
if f >= math.MaxUint64 {
|
||||
return 0, fmt.Errorf("too large: %v", s)
|
||||
}
|
||||
return uint64(f), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
||||
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Comma produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Comma(834142) -> 834,142
|
||||
func Comma(v int64) string {
|
||||
sign := ""
|
||||
|
||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||
if v == math.MinInt64 {
|
||||
return "-9,223,372,036,854,775,808"
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
sign = "-"
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
parts := []string{"", "", "", "", "", "", ""}
|
||||
j := len(parts) - 1
|
||||
|
||||
for v > 999 {
|
||||
parts[j] = strconv.FormatInt(v%1000, 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
v = v / 1000
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(v))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
||||
|
||||
// Commaf produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Commaf(834142.32) -> 834,142.32
|
||||
func Commaf(v float64) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CommafWithDigits works like the Commaf but limits the resulting
|
||||
// string to the given number of decimal places.
|
||||
//
|
||||
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||
func CommafWithDigits(f float64, decimals int) string {
|
||||
return stripTrailingDigits(Commaf(f), decimals)
|
||||
}
|
||||
|
||||
// BigComma produces a string form of the given big.Int in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigComma(b *big.Int) string {
|
||||
sign := ""
|
||||
if b.Sign() < 0 {
|
||||
sign = "-"
|
||||
b.Abs(b)
|
||||
}
|
||||
|
||||
athousand := big.NewInt(1000)
|
||||
c := (&big.Int{}).Set(b)
|
||||
_, m := oom(c, athousand)
|
||||
parts := make([]string, m+1)
|
||||
j := len(parts) - 1
|
||||
|
||||
mod := &big.Int{}
|
||||
for b.Cmp(athousand) >= 0 {
|
||||
b.DivMod(b, athousand, mod)
|
||||
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(b.Int64()))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
||||
41
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
41
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build go1.6
|
||||
// +build go1.6
|
||||
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BigCommaf produces a string form of the given big.Float in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigCommaf(v *big.Float) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v.Sign() < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v.Abs(v)
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(v.Text('f', -1), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
49
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
49
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func stripTrailingZeros(s string) string {
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
return s
|
||||
}
|
||||
offset := len(s) - 1
|
||||
for offset > 0 {
|
||||
if s[offset] == '.' {
|
||||
offset--
|
||||
break
|
||||
}
|
||||
if s[offset] != '0' {
|
||||
break
|
||||
}
|
||||
offset--
|
||||
}
|
||||
return s[:offset+1]
|
||||
}
|
||||
|
||||
func stripTrailingDigits(s string, digits int) string {
|
||||
if i := strings.Index(s, "."); i >= 0 {
|
||||
if digits <= 0 {
|
||||
return s[:i]
|
||||
}
|
||||
i++
|
||||
if i+digits >= len(s) {
|
||||
return s
|
||||
}
|
||||
return s[:i+digits]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Ftoa converts a float to a string with no trailing zeros.
|
||||
func Ftoa(num float64) string {
|
||||
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
||||
}
|
||||
|
||||
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||
// to the given number of decimal places, and no trailing zeros.
|
||||
func FtoaWithDigits(num float64, digits int) string {
|
||||
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
||||
}
|
||||
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
/*
|
||||
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||
|
||||
Durations can be turned into strings such as "3 days ago", numbers
|
||||
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||
"79 MiB" (whichever you prefer).
|
||||
*/
|
||||
package humanize
|
||||
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package humanize
|
||||
|
||||
/*
|
||||
Slightly adapted from the source to fit go-humanize.
|
||||
|
||||
Author: https://github.com/gorhill
|
||||
Source: https://gist.github.com/gorhill/5285193
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
renderFloatPrecisionMultipliers = [...]float64{
|
||||
1,
|
||||
10,
|
||||
100,
|
||||
1000,
|
||||
10000,
|
||||
100000,
|
||||
1000000,
|
||||
10000000,
|
||||
100000000,
|
||||
1000000000,
|
||||
}
|
||||
|
||||
renderFloatPrecisionRounders = [...]float64{
|
||||
0.5,
|
||||
0.05,
|
||||
0.005,
|
||||
0.0005,
|
||||
0.00005,
|
||||
0.000005,
|
||||
0.0000005,
|
||||
0.00000005,
|
||||
0.000000005,
|
||||
0.0000000005,
|
||||
}
|
||||
)
|
||||
|
||||
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
|
||||
// * thousands separator
|
||||
// * decimal separator
|
||||
// * decimal precision
|
||||
//
|
||||
// Usage: s := RenderFloat(format, n)
|
||||
// The format parameter tells how to render the number n.
|
||||
//
|
||||
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
|
||||
//
|
||||
// Examples of format strings, given n = 12345.6789:
|
||||
// "#,###.##" => "12,345.67"
|
||||
// "#,###." => "12,345"
|
||||
// "#,###" => "12345,678"
|
||||
// "#\u202F###,##" => "12 345,68"
|
||||
// "#.###,###### => 12.345,678900
|
||||
// "" (aka default format) => 12,345.67
|
||||
//
|
||||
// The highest precision allowed is 9 digits after the decimal symbol.
|
||||
// There is also a version for integer number, FormatInteger(),
|
||||
// which is convenient for calls within template.
|
||||
func FormatFloat(format string, n float64) string {
|
||||
// Special cases:
|
||||
// NaN = "NaN"
|
||||
// +Inf = "+Infinity"
|
||||
// -Inf = "-Infinity"
|
||||
if math.IsNaN(n) {
|
||||
return "NaN"
|
||||
}
|
||||
if n > math.MaxFloat64 {
|
||||
return "Infinity"
|
||||
}
|
||||
if n < (0.0 - math.MaxFloat64) {
|
||||
return "-Infinity"
|
||||
}
|
||||
|
||||
// default format
|
||||
precision := 2
|
||||
decimalStr := "."
|
||||
thousandStr := ","
|
||||
positiveStr := ""
|
||||
negativeStr := "-"
|
||||
|
||||
if len(format) > 0 {
|
||||
format := []rune(format)
|
||||
|
||||
// If there is an explicit format directive,
|
||||
// then default values are these:
|
||||
precision = 9
|
||||
thousandStr = ""
|
||||
|
||||
// collect indices of meaningful formatting directives
|
||||
formatIndx := []int{}
|
||||
for i, char := range format {
|
||||
if char != '#' && char != '0' {
|
||||
formatIndx = append(formatIndx, i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(formatIndx) > 0 {
|
||||
// Directive at index 0:
|
||||
// Must be a '+'
|
||||
// Raise an error if not the case
|
||||
// index: 0123456789
|
||||
// +0.000,000
|
||||
// +000,000.0
|
||||
// +0000.00
|
||||
// +0000
|
||||
if formatIndx[0] == 0 {
|
||||
if format[formatIndx[0]] != '+' {
|
||||
panic("RenderFloat(): invalid positive sign directive")
|
||||
}
|
||||
positiveStr = "+"
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// Two directives:
|
||||
// First is thousands separator
|
||||
// Raise an error if not followed by 3-digit
|
||||
// 0123456789
|
||||
// 0.000,000
|
||||
// 000,000.00
|
||||
if len(formatIndx) == 2 {
|
||||
if (formatIndx[1] - formatIndx[0]) != 4 {
|
||||
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||
}
|
||||
thousandStr = string(format[formatIndx[0]])
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// One directive:
|
||||
// Directive is decimal separator
|
||||
// The number of digit-specifier following the separator indicates wanted precision
|
||||
// 0123456789
|
||||
// 0.00
|
||||
// 000,0000
|
||||
if len(formatIndx) == 1 {
|
||||
decimalStr = string(format[formatIndx[0]])
|
||||
precision = len(format) - formatIndx[0] - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generate sign part
|
||||
var signStr string
|
||||
if n >= 0.000000001 {
|
||||
signStr = positiveStr
|
||||
} else if n <= -0.000000001 {
|
||||
signStr = negativeStr
|
||||
n = -n
|
||||
} else {
|
||||
signStr = ""
|
||||
n = 0.0
|
||||
}
|
||||
|
||||
// split number into integer and fractional parts
|
||||
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
||||
|
||||
// generate integer part string
|
||||
intStr := strconv.FormatInt(int64(intf), 10)
|
||||
|
||||
// add thousand separator if required
|
||||
if len(thousandStr) > 0 {
|
||||
for i := len(intStr); i > 3; {
|
||||
i -= 3
|
||||
intStr = intStr[:i] + thousandStr + intStr[i:]
|
||||
}
|
||||
}
|
||||
|
||||
// no fractional part, we can leave now
|
||||
if precision == 0 {
|
||||
return signStr + intStr
|
||||
}
|
||||
|
||||
// generate fractional part
|
||||
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
|
||||
// may need padding
|
||||
if len(fracStr) < precision {
|
||||
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
|
||||
}
|
||||
|
||||
return signStr + intStr + decimalStr + fracStr
|
||||
}
|
||||
|
||||
// FormatInteger produces a formatted number as string.
|
||||
// See FormatFloat.
|
||||
func FormatInteger(format string, n int) string {
|
||||
return FormatFloat(format, float64(n))
|
||||
}
|
||||
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package humanize
|
||||
|
||||
import "strconv"
|
||||
|
||||
// Ordinal gives you the input number in a rank/ordinal format.
|
||||
//
|
||||
// Ordinal(3) -> 3rd
|
||||
func Ordinal(x int) string {
|
||||
suffix := "th"
|
||||
switch x % 10 {
|
||||
case 1:
|
||||
if x%100 != 11 {
|
||||
suffix = "st"
|
||||
}
|
||||
case 2:
|
||||
if x%100 != 12 {
|
||||
suffix = "nd"
|
||||
}
|
||||
case 3:
|
||||
if x%100 != 13 {
|
||||
suffix = "rd"
|
||||
}
|
||||
}
|
||||
return strconv.Itoa(x) + suffix
|
||||
}
|
||||
127
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
127
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var siPrefixTable = map[float64]string{
|
||||
-30: "q", // quecto
|
||||
-27: "r", // ronto
|
||||
-24: "y", // yocto
|
||||
-21: "z", // zepto
|
||||
-18: "a", // atto
|
||||
-15: "f", // femto
|
||||
-12: "p", // pico
|
||||
-9: "n", // nano
|
||||
-6: "µ", // micro
|
||||
-3: "m", // milli
|
||||
0: "",
|
||||
3: "k", // kilo
|
||||
6: "M", // mega
|
||||
9: "G", // giga
|
||||
12: "T", // tera
|
||||
15: "P", // peta
|
||||
18: "E", // exa
|
||||
21: "Z", // zetta
|
||||
24: "Y", // yotta
|
||||
27: "R", // ronna
|
||||
30: "Q", // quetta
|
||||
}
|
||||
|
||||
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||
|
||||
// revfmap reverses the map and precomputes the power multiplier
|
||||
func revfmap(in map[float64]string) map[string]float64 {
|
||||
rv := map[string]float64{}
|
||||
for k, v := range in {
|
||||
rv[v] = math.Pow(10, k)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
var riParseRegex *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
ri := `^([\-0-9.]+)\s?([`
|
||||
for _, v := range siPrefixTable {
|
||||
ri += v
|
||||
}
|
||||
ri += `]?)(.*)`
|
||||
|
||||
riParseRegex = regexp.MustCompile(ri)
|
||||
}
|
||||
|
||||
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||
// and returns the prefix along with the value adjusted to be within
|
||||
// that prefix.
|
||||
//
|
||||
// See also: SI, ParseSI.
|
||||
//
|
||||
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||
func ComputeSI(input float64) (float64, string) {
|
||||
if input == 0 {
|
||||
return 0, ""
|
||||
}
|
||||
mag := math.Abs(input)
|
||||
exponent := math.Floor(logn(mag, 10))
|
||||
exponent = math.Floor(exponent/3) * 3
|
||||
|
||||
value := mag / math.Pow(10, exponent)
|
||||
|
||||
// Handle special case where value is exactly 1000.0
|
||||
// Should return 1 M instead of 1000 k
|
||||
if value == 1000.0 {
|
||||
exponent += 3
|
||||
value = mag / math.Pow(10, exponent)
|
||||
}
|
||||
|
||||
value = math.Copysign(value, input)
|
||||
|
||||
prefix := siPrefixTable[exponent]
|
||||
return value, prefix
|
||||
}
|
||||
|
||||
// SI returns a string with default formatting.
|
||||
//
|
||||
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||
//
|
||||
// See also: ComputeSI, ParseSI.
|
||||
//
|
||||
// e.g. SI(1000000, "B") -> 1 MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||
func SI(input float64, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return Ftoa(value) + " " + prefix + unit
|
||||
}
|
||||
|
||||
// SIWithDigits works like SI but limits the resulting string to the
|
||||
// given number of decimal places.
|
||||
//
|
||||
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||
func SIWithDigits(input float64, decimals int, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
||||
}
|
||||
|
||||
var errInvalid = errors.New("invalid input")
|
||||
|
||||
// ParseSI parses an SI string back into the number and unit.
|
||||
//
|
||||
// See also: SI, ComputeSI.
|
||||
//
|
||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||
func ParseSI(input string) (float64, string, error) {
|
||||
found := riParseRegex.FindStringSubmatch(input)
|
||||
if len(found) != 4 {
|
||||
return 0, "", errInvalid
|
||||
}
|
||||
mag := revSIPrefixTable[found[2]]
|
||||
unit := found[3]
|
||||
|
||||
base, err := strconv.ParseFloat(found[1], 64)
|
||||
return base * mag, unit, err
|
||||
}
|
||||
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Seconds-based time units
|
||||
const (
|
||||
Day = 24 * time.Hour
|
||||
Week = 7 * Day
|
||||
Month = 30 * Day
|
||||
Year = 12 * Month
|
||||
LongTime = 37 * Year
|
||||
)
|
||||
|
||||
// Time formats a time into a relative string.
|
||||
//
|
||||
// Time(someT) -> "3 weeks ago"
|
||||
func Time(then time.Time) string {
|
||||
return RelTime(then, time.Now(), "ago", "from now")
|
||||
}
|
||||
|
||||
// A RelTimeMagnitude struct contains a relative time point at which
|
||||
// the relative format of time will switch to a new format string. A
|
||||
// slice of these in ascending order by their "D" field is passed to
|
||||
// CustomRelTime to format durations.
|
||||
//
|
||||
// The Format field is a string that may contain a "%s" which will be
|
||||
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||
// now") and a "%d" that will be replaced by the quantity.
|
||||
//
|
||||
// The DivBy field is the amount of time the time difference must be
|
||||
// divided by in order to display correctly.
|
||||
//
|
||||
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||
// DivBy should be time.Minute so whatever the duration is will be
|
||||
// expressed in minutes.
|
||||
type RelTimeMagnitude struct {
|
||||
D time.Duration
|
||||
Format string
|
||||
DivBy time.Duration
|
||||
}
|
||||
|
||||
var defaultMagnitudes = []RelTimeMagnitude{
|
||||
{time.Second, "now", time.Second},
|
||||
{2 * time.Second, "1 second %s", 1},
|
||||
{time.Minute, "%d seconds %s", time.Second},
|
||||
{2 * time.Minute, "1 minute %s", 1},
|
||||
{time.Hour, "%d minutes %s", time.Minute},
|
||||
{2 * time.Hour, "1 hour %s", 1},
|
||||
{Day, "%d hours %s", time.Hour},
|
||||
{2 * Day, "1 day %s", 1},
|
||||
{Week, "%d days %s", Day},
|
||||
{2 * Week, "1 week %s", 1},
|
||||
{Month, "%d weeks %s", Week},
|
||||
{2 * Month, "1 month %s", 1},
|
||||
{Year, "%d months %s", Month},
|
||||
{18 * Month, "1 year %s", 1},
|
||||
{2 * Year, "2 years %s", 1},
|
||||
{LongTime, "%d years %s", Year},
|
||||
{math.MaxInt64, "a long while %s", 1},
|
||||
}
|
||||
|
||||
// RelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times and two labels. In addition to the generic time
|
||||
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||
// the label corresponding to the smaller time is applied.
|
||||
//
|
||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||
}
|
||||
|
||||
// CustomRelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times two labels and a table of relative time formats.
|
||||
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||
// labels are used applied so that the label corresponding to the
|
||||
// smaller time is applied.
|
||||
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||
lbl := albl
|
||||
diff := b.Sub(a)
|
||||
|
||||
if a.After(b) {
|
||||
lbl = blbl
|
||||
diff = a.Sub(b)
|
||||
}
|
||||
|
||||
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||
return magnitudes[i].D > diff
|
||||
})
|
||||
|
||||
if n >= len(magnitudes) {
|
||||
n = len(magnitudes) - 1
|
||||
}
|
||||
mag := magnitudes[n]
|
||||
args := []interface{}{}
|
||||
escaped := false
|
||||
for _, ch := range mag.Format {
|
||||
if escaped {
|
||||
switch ch {
|
||||
case 's':
|
||||
args = append(args, lbl)
|
||||
case 'd':
|
||||
args = append(args, diff/mag.DivBy)
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
escaped = ch == '%'
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(mag.Format, args...)
|
||||
}
|
||||
41
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
41
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# Changelog
|
||||
|
||||
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
|
||||
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
|
||||
|
||||
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
|
||||
|
||||
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
|
||||
|
||||
### Fixes
|
||||
|
||||
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
|
||||
|
||||
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
|
||||
|
||||
## Changelog
|
||||
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
26
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to this project!
|
||||
|
||||
### Tips
|
||||
|
||||
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
|
||||
|
||||
Always try to include a test case! If it is not possible or not necessary,
|
||||
please explain why in the pull request description.
|
||||
|
||||
### Releasing
|
||||
|
||||
Commits that would precipitate a SemVer change, as described in the Conventional
|
||||
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||
to create a release candidate pull request. Once submitted, `release-please`
|
||||
will create a release.
|
||||
|
||||
For tips on how to work with `release-please`, see its documentation.
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
You may have already signed it for other Google projects.
|
||||
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Paul Borman <borman@google.com>
|
||||
bmatsuo
|
||||
shawnps
|
||||
theory
|
||||
jboverfelt
|
||||
dsymonds
|
||||
cd1
|
||||
wallclockbuilder
|
||||
dansouza
|
||||
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
21
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# uuid
|
||||
The uuid package generates and inspects UUIDs based on
|
||||
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
|
||||
and DCE 1.1: Authentication and Security Services.
|
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named
|
||||
code.google.com/p/go-uuid). It differs from these earlier packages in that
|
||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
###### Install
|
||||
```sh
|
||||
go get github.com/google/uuid
|
||||
```
|
||||
|
||||
###### Documentation
|
||||
[](https://pkg.go.dev/github.com/google/uuid)
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
http://pkg.go.dev/github.com/google/uuid
|
||||
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
|
||||
uuid, err := NewUUID()
|
||||
if err == nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() (UUID, error) {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() (UUID, error) {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||
// for Version 2 UUIDs.
|
||||
func (uuid UUID) Domain() Domain {
|
||||
return Domain(uuid[9])
|
||||
}
|
||||
|
||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||
// UUIDs.
|
||||
func (uuid UUID) ID() uint32 {
|
||||
return binary.BigEndian.Uint32(uuid[0:4])
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
||||
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uuid generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||
// Services.
|
||||
//
|
||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||
// maps or compared directly.
|
||||
package uuid
|
||||
59
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known namespace IDs and UUIDs
|
||||
var (
|
||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
|
||||
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
|
||||
Max = UUID{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
}
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
||||
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (uuid UUID) MarshalText() ([]byte, error) {
|
||||
var js [36]byte
|
||||
encodeHex(js[:], uuid)
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*uuid = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (uuid UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(uuid[:], data)
|
||||
return nil
|
||||
}
|
||||
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeMu sync.Mutex
|
||||
ifname string // name of interface being used
|
||||
nodeID [6]byte // hardware for version 1 UUIDs
|
||||
zeroID [6]byte // nodeID with only 0's
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return setNodeInterface(name)
|
||||
}
|
||||
|
||||
func setNodeInterface(name string) bool {
|
||||
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||
if iname != "" && addr != nil {
|
||||
ifname = iname
|
||||
copy(nodeID[:], addr)
|
||||
return true
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
ifname = "random"
|
||||
randomBits(nodeID[:])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
nid := nodeID
|
||||
return nid[:]
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
copy(nodeID[:], id)
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
var node [6]byte
|
||||
copy(node[:], uuid[10:])
|
||||
return node[:]
|
||||
}
|
||||
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build js
|
||||
|
||||
package uuid
|
||||
|
||||
// getHardwareInterface returns nil values for the JS version of the code.
|
||||
// This removes the "net" dependency, because it is not used in the browser.
|
||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
||||
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !js
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var interfaces []net.Interface // cached list of interfaces
|
||||
|
||||
// getHardwareInterface returns the name and hardware address of interface name.
|
||||
// If name is "" then the name and hardware address of one of the system's
|
||||
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||
// there are no interfaces) then "", nil is returned.
|
||||
//
|
||||
// Only addresses of at least 6 bytes are returned.
|
||||
func getHardwareInterface(name string) (string, []byte) {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
return ifs.Name, ifs.HardwareAddr
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
118
vendor/github.com/google/uuid/null.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2021 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
// NullUUID represents a UUID that may be null.
|
||||
// NullUUID implements the SQL driver.Scanner interface so
|
||||
// it can be used as a scan destination:
|
||||
//
|
||||
// var u uuid.NullUUID
|
||||
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||
// ...
|
||||
// if u.Valid {
|
||||
// // use u.UUID
|
||||
// } else {
|
||||
// // NULL value
|
||||
// }
|
||||
//
|
||||
type NullUUID struct {
|
||||
UUID UUID
|
||||
Valid bool // Valid is true if UUID is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the SQL driver.Scanner interface.
|
||||
func (nu *NullUUID) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
nu.UUID, nu.Valid = Nil, false
|
||||
return nil
|
||||
}
|
||||
|
||||
err := nu.UUID.Scan(value)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (nu NullUUID) Value() (driver.Value, error) {
|
||||
if !nu.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// Delegate to UUID Value function
|
||||
return nu.UUID.Value()
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (nu NullUUID) MarshalBinary() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID[:], nil
|
||||
}
|
||||
|
||||
return []byte(nil), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(nu.UUID[:], data)
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (nu NullUUID) MarshalText() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return nu.UUID.MarshalText()
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (nu *NullUUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
nu.Valid = false
|
||||
return err
|
||||
}
|
||||
nu.UUID = id
|
||||
nu.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (nu NullUUID) MarshalJSON() ([]byte, error) {
|
||||
if nu.Valid {
|
||||
return json.Marshal(nu.UUID)
|
||||
}
|
||||
|
||||
return jsonNull, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
|
||||
if bytes.Equal(data, jsonNull) {
|
||||
*nu = NullUUID{}
|
||||
return nil // valid null UUID
|
||||
}
|
||||
err := json.Unmarshal(data, &nu.UUID)
|
||||
nu.Valid = err == nil
|
||||
return err
|
||||
}
|
||||
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
|
||||
case string:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// see Parse for required string format
|
||||
u, err := Parse(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Scan: %v", err)
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
|
||||
case []byte:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// assumes a simple slice of bytes if 16 bytes
|
||||
// otherwise attempts to parse
|
||||
if len(src) != 16 {
|
||||
return uuid.Scan(string(src))
|
||||
}
|
||||
copy((*uuid)[:], src)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements sql.Valuer so that UUIDs can be written to databases
|
||||
// transparently. Currently, UUIDs map to strings. Please consult
|
||||
// database-specific driver documentation for matching types.
|
||||
func (uuid UUID) Value() (driver.Value, error) {
|
||||
return uuid.String(), nil
|
||||
}
|
||||
134
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
134
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
timeMu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clockSeq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clockSeq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
||||
// random clock sequence is generated the first time a clock sequence is
|
||||
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
||||
func ClockSequence() int {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clockSeq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
oldSeq := clockSeq
|
||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if oldSeq != clockSeq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
|
||||
func (uuid UUID) Time() Time {
|
||||
var t Time
|
||||
switch uuid.Version() {
|
||||
case 6:
|
||||
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
|
||||
t = Time(time)
|
||||
case 7:
|
||||
time := binary.BigEndian.Uint64(uuid[:8])
|
||||
t = Time((time>>16)*10000 + g1582ns100)
|
||||
default: // forward compatible
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
t = Time(time)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid.
|
||||
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() int {
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
|
||||
}
|
||||
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
365
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
365
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
@@ -0,0 +1,365 @@
|
||||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID [16]byte
|
||||
|
||||
// A Version represents a UUID's version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUID's variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
const randPoolSize = 16 * 16
|
||||
|
||||
var (
|
||||
rander = rand.Reader // random function
|
||||
poolEnabled = false
|
||||
poolMu sync.Mutex
|
||||
poolPos = randPoolSize // protected with poolMu
|
||||
pool [randPoolSize]byte // protected with poolMu
|
||||
)
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||
func IsInvalidLengthError(err error) bool {
|
||||
_, ok := err.(invalidLengthError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
|
||||
// the standard UUID forms defined in RFC 4122
|
||||
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
|
||||
// Parse accepts non-standard strings such as the raw hex encoding
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
|
||||
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
|
||||
// examined in the latter case. Parse should not be used to validate strings as
|
||||
// it parses non-standard encodings as indicated above.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36:
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
case 36 + 2:
|
||||
s = s[1:]
|
||||
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
case 32:
|
||||
var ok bool
|
||||
for i := range uuid {
|
||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(s[x], s[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||
func ParseBytes(b []byte) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
b = b[1:]
|
||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
var ok bool
|
||||
for i := 0; i < 32; i += 2 {
|
||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34,
|
||||
} {
|
||||
v, ok := xtob(b[x], b[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||
func MustParse(s string) UUID {
|
||||
uuid, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||
// does not have a length of 16. The bytes are copied from the slice.
|
||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||
err = uuid.UnmarshalBinary(b)
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// Must returns uuid if err is nil and panics otherwise.
|
||||
func Must(uuid UUID, err error) UUID {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
// It returns an error if the format is invalid, otherwise nil.
|
||||
func Validate(s string) error {
|
||||
switch len(s) {
|
||||
// Standard UUID format
|
||||
case 36:
|
||||
|
||||
// UUID with "urn:uuid:" prefix
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// UUID enclosed in braces
|
||||
case 36 + 2:
|
||||
if s[0] != '{' || s[len(s)-1] != '}' {
|
||||
return fmt.Errorf("invalid bracketed UUID format")
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
|
||||
// UUID without hyphens
|
||||
case 32:
|
||||
for i := 0; i < len(s); i += 2 {
|
||||
_, ok := xtob(s[i], s[i+1])
|
||||
if !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return invalidLengthError{len(s)}
|
||||
}
|
||||
|
||||
// Check for standard UUID format
|
||||
if len(s) == 36 {
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
if _, ok := xtob(s[x], s[x+1]); !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
var buf [36]byte
|
||||
encodeHex(buf[:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
var buf [36 + 9]byte
|
||||
copy(buf[:], "urn:uuid:")
|
||||
encodeHex(buf[9:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst, uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
hex.Encode(dst[14:18], uuid[6:8])
|
||||
dst[18] = '-'
|
||||
hex.Encode(dst[19:23], uuid[8:10])
|
||||
dst[23] = '-'
|
||||
hex.Encode(dst[24:], uuid[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the version of uuid.
|
||||
func (uuid UUID) Version() Version {
|
||||
return Version(uuid[6] >> 4)
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implements io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
||||
|
||||
// EnableRandPool enables internal randomness pool used for Random
|
||||
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||
// the random number generator on demand in batches. Enabling the pool
|
||||
// may improve the UUID generation throughput significantly.
|
||||
//
|
||||
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||
// for security sensitive applications.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func EnableRandPool() {
|
||||
poolEnabled = true
|
||||
}
|
||||
|
||||
// DisableRandPool disables the randomness pool if it was previously
|
||||
// enabled with EnableRandPool.
|
||||
//
|
||||
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||
// only be called when there is no possibility that New or any other
|
||||
// UUID Version 4 generation function will be called concurrently.
|
||||
func DisableRandPool() {
|
||||
poolEnabled = false
|
||||
defer poolMu.Unlock()
|
||||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
||||
// UUIDs is a slice of UUID types.
|
||||
type UUIDs []UUID
|
||||
|
||||
// Strings returns a string slice containing the string form of each UUID in uuids.
|
||||
func (uuids UUIDs) Strings() []string {
|
||||
var uuidStrs = make([]string, len(uuids))
|
||||
for i, uuid := range uuids {
|
||||
uuidStrs[i] = uuid.String()
|
||||
}
|
||||
return uuidStrs
|
||||
}
|
||||
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil and an error.
|
||||
//
|
||||
// In most cases, New should be used.
|
||||
func NewUUID() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
timeLow := uint32(now & 0xffffffff)
|
||||
timeMid := uint16((now >> 32) & 0xffff)
|
||||
timeHi := uint16((now >> 48) & 0x0fff)
|
||||
timeHi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], timeLow)
|
||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
76
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "io"
|
||||
|
||||
// New creates a new random UUID or panics. New is equivalent to
|
||||
// the expression
|
||||
//
|
||||
// uuid.Must(uuid.NewRandom())
|
||||
func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
if !poolEnabled {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
return newRandomFromPool()
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
var uuid UUID
|
||||
_, err := io.ReadFull(r, uuid[:])
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func newRandomFromPool() (UUID, error) {
|
||||
var uuid UUID
|
||||
poolMu.Lock()
|
||||
if poolPos == randPoolSize {
|
||||
_, err := io.ReadFull(rander, pool[:])
|
||||
if err != nil {
|
||||
poolMu.Unlock()
|
||||
return Nil, err
|
||||
}
|
||||
poolPos = 0
|
||||
}
|
||||
copy(uuid[:], pool[poolPos:(poolPos+16)])
|
||||
poolPos += 16
|
||||
poolMu.Unlock()
|
||||
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
||||
56
vendor/github.com/google/uuid/version6.go
generated
vendored
Normal file
56
vendor/github.com/google/uuid/version6.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
|
||||
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
|
||||
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
|
||||
//
|
||||
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewV6 returns Nil and an error.
|
||||
func NewV6() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_high |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_mid | time_low_and_version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|clk_seq_hi_res | clk_seq_low | node (0-1) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| node (2-5) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
|
||||
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
uuid[6] = 0x60 | (uuid[6] & 0x0F)
|
||||
uuid[8] = 0x80 | (uuid[8] & 0x3F)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
104
vendor/github.com/google/uuid/version7.go
generated
vendored
Normal file
104
vendor/github.com/google/uuid/version7.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// UUID version 7 features a time-ordered value field derived from the widely
|
||||
// implemented and well known Unix Epoch timestamp source,
|
||||
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
|
||||
// As well as improved entropy characteristics over versions 1 or 6.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
|
||||
//
|
||||
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
|
||||
//
|
||||
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
// On error, NewV7 returns Nil and an error
|
||||
func NewV7() (UUID, error) {
|
||||
uuid, err := NewRandom()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// it use NewRandomFromReader fill random bits.
|
||||
// On error, NewV7FromReader returns Nil and an error.
|
||||
func NewV7FromReader(r io.Reader) (UUID, error) {
|
||||
uuid, err := NewRandomFromReader(r)
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
|
||||
// uuid[8] already has the right version number (Variant is 10)
|
||||
// see function NewV7 and NewV7FromReader
|
||||
func makeV7(uuid []byte) {
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms | ver | rand_a (12 bit seq) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|var| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
_ = uuid[15] // bounds check
|
||||
|
||||
t, s := getV7Time()
|
||||
|
||||
uuid[0] = byte(t >> 40)
|
||||
uuid[1] = byte(t >> 32)
|
||||
uuid[2] = byte(t >> 24)
|
||||
uuid[3] = byte(t >> 16)
|
||||
uuid[4] = byte(t >> 8)
|
||||
uuid[5] = byte(t)
|
||||
|
||||
uuid[6] = 0x70 | (0x0F & byte(s>>8))
|
||||
uuid[7] = byte(s)
|
||||
}
|
||||
|
||||
// lastV7time is the last time we returned stored as:
|
||||
//
|
||||
// 52 bits of time in milliseconds since epoch
|
||||
// 12 bits of (fractional nanoseconds) >> 8
|
||||
var lastV7time int64
|
||||
|
||||
const nanoPerMilli = 1000000
|
||||
|
||||
// getV7Time returns the time in milliseconds and nanoseconds / 256.
|
||||
// The returned (milli << 12 + seq) is guarenteed to be greater than
|
||||
// (milli << 12 + seq) returned by any previous call to getV7Time.
|
||||
func getV7Time() (milli, seq int64) {
|
||||
timeMu.Lock()
|
||||
defer timeMu.Unlock()
|
||||
|
||||
nano := timeNow().UnixNano()
|
||||
milli = nano / nanoPerMilli
|
||||
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
|
||||
seq = (nano - milli*nanoPerMilli) >> 8
|
||||
now := milli<<12 + seq
|
||||
if now <= lastV7time {
|
||||
now = lastV7time + 1
|
||||
milli = now >> 12
|
||||
seq = now & 0xfff
|
||||
}
|
||||
lastV7time = now
|
||||
return milli, seq
|
||||
}
|
||||
201
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
201
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2022 Alan Shreve (@inconshreveable)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# mousetrap
|
||||
|
||||
mousetrap is a tiny library that answers a single question.
|
||||
|
||||
On a Windows machine, was the process invoked by someone double clicking on
|
||||
the executable file while browsing in explorer?
|
||||
|
||||
### Motivation
|
||||
|
||||
Windows developers unfamiliar with command line tools will often "double-click"
|
||||
the executable for a tool. Because most CLI tools print the help and then exit
|
||||
when invoked without arguments, this is often very frustrating for those users.
|
||||
|
||||
mousetrap provides a way to detect these invocations so that you can provide
|
||||
more helpful behavior and instructions on how to run the CLI tool. To see what
|
||||
this looks like, both from an organizational and a technical perspective, see
|
||||
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
|
||||
|
||||
### The interface
|
||||
|
||||
The library exposes a single interface:
|
||||
|
||||
func StartedByExplorer() (bool)
|
||||
16
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
16
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package mousetrap
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user
|
||||
// double-clicking on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
//
|
||||
// On non-Windows platforms, it always returns false.
|
||||
func StartedByExplorer() bool {
|
||||
return false
|
||||
}
|
||||
42
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
42
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package mousetrap
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
|
||||
snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CloseHandle(snapshot)
|
||||
var procEntry syscall.ProcessEntry32
|
||||
procEntry.Size = uint32(unsafe.Sizeof(procEntry))
|
||||
if err = syscall.Process32First(snapshot, &procEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
if procEntry.ProcessID == uint32(pid) {
|
||||
return &procEntry, nil
|
||||
}
|
||||
err = syscall.Process32Next(snapshot, &procEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user double-clicking
|
||||
// on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
func StartedByExplorer() bool {
|
||||
pe, err := getProcessEntry(syscall.Getppid())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
|
||||
}
|
||||
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# go-isatty
|
||||
|
||||
[](http://godoc.org/github.com/mattn/go-isatty)
|
||||
[](https://codecov.io/gh/mattn/go-isatty)
|
||||
[](https://coveralls.io/github/mattn/go-isatty?branch=master)
|
||||
[](https://goreportcard.com/report/mattn/go-isatty)
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Cygwin/MSYS2 Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
||||
|
||||
## Thanks
|
||||
|
||||
* k-takata: base idea for IsCygwinTerminal
|
||||
|
||||
https://github.com/k-takata/go-iscygpty
|
||||
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package isatty implements interface to isatty
|
||||
package isatty
|
||||
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
Normal file
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic "$d"
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
20
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
20
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
|
||||
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
17
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
17
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
//go:build (appengine || js || nacl || tinygo || wasm) && !windows
|
||||
// +build appengine js nacl tinygo wasm
|
||||
// +build !windows
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on js and appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
23
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
23
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
path, err := syscall.Fd2path(int(fd))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
21
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
21
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
//go:build solaris && !appengine
|
||||
// +build solaris,!appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermio(int(fd), unix.TCGETA)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
20
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
20
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build (linux || aix || zos) && !appengine && !tinygo
|
||||
// +build linux aix zos
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
//go:build windows && !appengine
|
||||
// +build windows,!appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
objectNameInfo uintptr = 1
|
||||
fileNameInfo = 2
|
||||
fileTypePipe = 3
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
ntdll = syscall.NewLazyDLL("ntdll.dll")
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
|
||||
procGetFileType = kernel32.NewProc("GetFileType")
|
||||
procNtQueryObject = ntdll.NewProc("NtQueryObject")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Check if GetFileInformationByHandleEx is available.
|
||||
if procGetFileInformationByHandleEx.Find() != nil {
|
||||
procGetFileInformationByHandleEx = nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
|
||||
// Check pipe name is used for cygwin/msys2 pty.
|
||||
// Cygwin/MSYS2 PTY has a name like:
|
||||
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
|
||||
func isCygwinPipeName(name string) bool {
|
||||
token := strings.Split(name, "-")
|
||||
if len(token) < 5 {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[0] != `\msys` &&
|
||||
token[0] != `\cygwin` &&
|
||||
token[0] != `\Device\NamedPipe\msys` &&
|
||||
token[0] != `\Device\NamedPipe\cygwin` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[1] == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(token[2], "pty") {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[3] != `from` && token[3] != `to` {
|
||||
return false
|
||||
}
|
||||
|
||||
if token[4] != "master" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
|
||||
// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion
|
||||
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
|
||||
// Windows vista to 10
|
||||
// see https://stackoverflow.com/a/18792477 for details
|
||||
func getFileNameByHandle(fd uintptr) (string, error) {
|
||||
if procNtQueryObject == nil {
|
||||
return "", errors.New("ntdll.dll: NtQueryObject not supported")
|
||||
}
|
||||
|
||||
var buf [4 + syscall.MAX_PATH]uint16
|
||||
var result int
|
||||
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
|
||||
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
|
||||
if r != 0 {
|
||||
return "", e
|
||||
}
|
||||
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
if procGetFileInformationByHandleEx == nil {
|
||||
name, err := getFileNameByHandle(fd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return isCygwinPipeName(name)
|
||||
}
|
||||
|
||||
// Cygwin/msys's pty is a pipe.
|
||||
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
|
||||
if ft != fileTypePipe || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var buf [2 + syscall.MAX_PATH]uint16
|
||||
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
|
||||
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
|
||||
uintptr(len(buf)*2), 0, 0)
|
||||
if r == 0 || e != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
l := *(*uint32)(unsafe.Pointer(&buf))
|
||||
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
|
||||
}
|
||||
31
vendor/github.com/munnerz/goautoneg/LICENSE
generated
vendored
Normal file
31
vendor/github.com/munnerz/goautoneg/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
13
vendor/github.com/munnerz/goautoneg/Makefile
generated
vendored
Normal file
13
vendor/github.com/munnerz/goautoneg/Makefile
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
||||
67
vendor/github.com/munnerz/goautoneg/README.txt
generated
vendored
Normal file
67
vendor/github.com/munnerz/goautoneg/README.txt
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
||||
189
vendor/github.com/munnerz/goautoneg/autoneg.go
generated
vendored
Normal file
189
vendor/github.com/munnerz/goautoneg/autoneg.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
/*
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// acceptSlice is defined to implement sort interface.
|
||||
type acceptSlice []Accept
|
||||
|
||||
func (slice acceptSlice) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (slice acceptSlice) Less(i, j int) bool {
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (slice acceptSlice) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
func stringTrimSpaceCutset(r rune) bool {
|
||||
return r == ' '
|
||||
}
|
||||
|
||||
func nextSplitElement(s, sep string) (item string, remaining string) {
|
||||
if index := strings.Index(s, sep); index != -1 {
|
||||
return s[:index], s[index+1:]
|
||||
}
|
||||
return s, ""
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) acceptSlice {
|
||||
partsCount := 0
|
||||
remaining := header
|
||||
for len(remaining) > 0 {
|
||||
partsCount++
|
||||
_, remaining = nextSplitElement(remaining, ",")
|
||||
}
|
||||
accept := make(acceptSlice, 0, partsCount)
|
||||
|
||||
remaining = header
|
||||
var part string
|
||||
for len(remaining) > 0 {
|
||||
part, remaining = nextSplitElement(remaining, ",")
|
||||
part = strings.TrimFunc(part, stringTrimSpaceCutset)
|
||||
|
||||
a := Accept{
|
||||
Q: 1.0,
|
||||
}
|
||||
|
||||
sp, remainingPart := nextSplitElement(part, ";")
|
||||
|
||||
sp0, spRemaining := nextSplitElement(sp, "/")
|
||||
a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||
|
||||
switch {
|
||||
case len(spRemaining) == 0:
|
||||
if a.Type == "*" {
|
||||
a.SubType = "*"
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
default:
|
||||
var sp1 string
|
||||
sp1, spRemaining = nextSplitElement(spRemaining, "/")
|
||||
if len(spRemaining) > 0 {
|
||||
continue
|
||||
}
|
||||
a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||
}
|
||||
|
||||
if len(remainingPart) == 0 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
a.Params = make(map[string]string)
|
||||
for len(remainingPart) > 0 {
|
||||
sp, remainingPart = nextSplitElement(remainingPart, ";")
|
||||
sp0, spRemaining = nextSplitElement(sp, "=")
|
||||
if len(spRemaining) == 0 {
|
||||
continue
|
||||
}
|
||||
var sp1 string
|
||||
sp1, spRemaining = nextSplitElement(spRemaining, "=")
|
||||
if len(spRemaining) != 0 {
|
||||
continue
|
||||
}
|
||||
token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp1, 32)
|
||||
} else {
|
||||
a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
sort.Sort(accept)
|
||||
return accept
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
15
vendor/github.com/ncruces/go-strftime/.gitignore
generated
vendored
Normal file
15
vendor/github.com/ncruces/go-strftime/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
21
vendor/github.com/ncruces/go-strftime/LICENSE
generated
vendored
Normal file
21
vendor/github.com/ncruces/go-strftime/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Nuno Cruces
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
5
vendor/github.com/ncruces/go-strftime/README.md
generated
vendored
Normal file
5
vendor/github.com/ncruces/go-strftime/README.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# `strftime`/`strptime` compatible time formatting and parsing for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/ncruces/go-strftime)
|
||||
[](https://goreportcard.com/report/github.com/ncruces/go-strftime)
|
||||
[](https://raw.githack.com/wiki/ncruces/go-strftime/coverage.html)
|
||||
107
vendor/github.com/ncruces/go-strftime/parser.go
generated
vendored
Normal file
107
vendor/github.com/ncruces/go-strftime/parser.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package strftime
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
type parser struct {
|
||||
format func(spec, flag byte) error
|
||||
literal func(byte) error
|
||||
}
|
||||
|
||||
func (p *parser) parse(fmt string) error {
|
||||
const (
|
||||
initial = iota
|
||||
percent
|
||||
flagged
|
||||
modified
|
||||
)
|
||||
|
||||
var flag, modifier byte
|
||||
var err error
|
||||
state := initial
|
||||
start := 0
|
||||
for i, b := range []byte(fmt) {
|
||||
switch state {
|
||||
default:
|
||||
if b == '%' {
|
||||
state = percent
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
err = p.literal(b)
|
||||
|
||||
case percent:
|
||||
if b == '-' || b == ':' {
|
||||
state = flagged
|
||||
flag = b
|
||||
continue
|
||||
}
|
||||
if b == 'E' || b == 'O' {
|
||||
state = modified
|
||||
modifier = b
|
||||
flag = 0
|
||||
continue
|
||||
}
|
||||
err = p.format(b, 0)
|
||||
state = initial
|
||||
|
||||
case flagged:
|
||||
if b == 'E' || b == 'O' {
|
||||
state = modified
|
||||
modifier = b
|
||||
continue
|
||||
}
|
||||
err = p.format(b, flag)
|
||||
state = initial
|
||||
|
||||
case modified:
|
||||
if okModifier(modifier, b) {
|
||||
err = p.format(b, flag)
|
||||
} else {
|
||||
err = p.literals(fmt[start : i+1])
|
||||
}
|
||||
state = initial
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err, ok := err.(formatError); ok {
|
||||
err.setDirective(fmt, start, i)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if state != initial {
|
||||
return p.literals(fmt[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *parser) literals(literal string) error {
|
||||
for _, b := range []byte(literal) {
|
||||
if err := p.literal(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type literalErr string
|
||||
|
||||
func (e literalErr) Error() string {
|
||||
return "strftime: unsupported literal: " + string(e)
|
||||
}
|
||||
|
||||
type formatError struct {
|
||||
message string
|
||||
directive string
|
||||
}
|
||||
|
||||
func (e formatError) Error() string {
|
||||
return "strftime: unsupported directive: " + e.directive + " " + e.message
|
||||
}
|
||||
|
||||
func (e *formatError) setDirective(str string, i, j int) {
|
||||
_, n := utf8.DecodeRuneInString(str[j:])
|
||||
e.directive = str[i : j+n]
|
||||
}
|
||||
96
vendor/github.com/ncruces/go-strftime/pkg.go
generated
vendored
Normal file
96
vendor/github.com/ncruces/go-strftime/pkg.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Package strftime provides strftime/strptime compatible time formatting and parsing.
|
||||
|
||||
The following formatting specifiers are available:
|
||||
|
||||
Date (Year, Month, Day):
|
||||
%Y - Year with century (can be negative, 4 digits at least)
|
||||
-0001, 0000, 1995, 2009, 14292, etc.
|
||||
%C - year / 100 (round down, 20 in 2009)
|
||||
%y - year % 100 (00..99)
|
||||
|
||||
%m - Month of the year, zero-padded (01..12)
|
||||
%-m no-padded (1..12)
|
||||
%B - Full month name (January)
|
||||
%b - Abbreviated month name (Jan)
|
||||
%h - Equivalent to %b
|
||||
|
||||
%d - Day of the month, zero-padded (01..31)
|
||||
%-d no-padded (1..31)
|
||||
%e - Day of the month, blank-padded ( 1..31)
|
||||
|
||||
%j - Day of the year (001..366)
|
||||
%-j no-padded (1..366)
|
||||
|
||||
Time (Hour, Minute, Second, Subsecond):
|
||||
%H - Hour of the day, 24-hour clock, zero-padded (00..23)
|
||||
%-H no-padded (0..23)
|
||||
%k - Hour of the day, 24-hour clock, blank-padded ( 0..23)
|
||||
%I - Hour of the day, 12-hour clock, zero-padded (01..12)
|
||||
%-I no-padded (1..12)
|
||||
%l - Hour of the day, 12-hour clock, blank-padded ( 1..12)
|
||||
%P - Meridian indicator, lowercase (am or pm)
|
||||
%p - Meridian indicator, uppercase (AM or PM)
|
||||
|
||||
%M - Minute of the hour (00..59)
|
||||
%-M no-padded (0..59)
|
||||
|
||||
%S - Second of the minute (00..60)
|
||||
%-S no-padded (0..60)
|
||||
|
||||
%L - Millisecond of the second (000..999)
|
||||
%f - Microsecond of the second (000000..999999)
|
||||
%N - Nanosecond of the second (000000000..999999999)
|
||||
|
||||
Time zone:
|
||||
%z - Time zone as hour and minute offset from UTC (e.g. +0900)
|
||||
%:z - hour and minute offset from UTC with a colon (e.g. +09:00)
|
||||
%Z - Time zone abbreviation (e.g. MST)
|
||||
|
||||
Weekday:
|
||||
%A - Full weekday name (Sunday)
|
||||
%a - Abbreviated weekday name (Sun)
|
||||
%u - Day of the week (Monday is 1, 1..7)
|
||||
%w - Day of the week (Sunday is 0, 0..6)
|
||||
|
||||
ISO 8601 week-based year and week number:
|
||||
Week 1 of YYYY starts with a Monday and includes YYYY-01-04.
|
||||
The days in the year before the first week are in the last week of
|
||||
the previous year.
|
||||
%G - Week-based year
|
||||
%g - Last 2 digits of the week-based year (00..99)
|
||||
%V - Week number of the week-based year (01..53)
|
||||
%-V no-padded (1..53)
|
||||
|
||||
Week number:
|
||||
Week 1 of YYYY starts with a Sunday or Monday (according to %U or %W).
|
||||
The days in the year before the first week are in week 0.
|
||||
%U - Week number of the year. The week starts with Sunday. (00..53)
|
||||
%-U no-padded (0..53)
|
||||
%W - Week number of the year. The week starts with Monday. (00..53)
|
||||
%-W no-padded (0..53)
|
||||
|
||||
Seconds since the Unix Epoch:
|
||||
%s - Number of seconds since 1970-01-01 00:00:00 UTC.
|
||||
%Q - Number of milliseconds since 1970-01-01 00:00:00 UTC.
|
||||
|
||||
Literal string:
|
||||
%n - Newline character (\n)
|
||||
%t - Tab character (\t)
|
||||
%% - Literal % character
|
||||
|
||||
Combination:
|
||||
%c - date and time (%a %b %e %T %Y)
|
||||
%D - Date (%m/%d/%y)
|
||||
%F - ISO 8601 date format (%Y-%m-%d)
|
||||
%v - VMS date (%e-%b-%Y)
|
||||
%x - Same as %D
|
||||
%X - Same as %T
|
||||
%r - 12-hour time (%I:%M:%S %p)
|
||||
%R - 24-hour time (%H:%M)
|
||||
%T - 24-hour time (%H:%M:%S)
|
||||
%+ - date(1) (%a %b %e %H:%M:%S %Z %Y)
|
||||
|
||||
The modifiers “E” and “O” are ignored.
|
||||
*/
|
||||
package strftime
|
||||
241
vendor/github.com/ncruces/go-strftime/specifiers.go
generated
vendored
Normal file
241
vendor/github.com/ncruces/go-strftime/specifiers.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
package strftime
|
||||
|
||||
import "strings"
|
||||
|
||||
// https://strftime.org/
|
||||
func goLayout(spec, flag byte, parsing bool) string {
|
||||
switch spec {
|
||||
default:
|
||||
return ""
|
||||
|
||||
case 'B':
|
||||
return "January"
|
||||
case 'b', 'h':
|
||||
return "Jan"
|
||||
case 'm':
|
||||
if flag == '-' || parsing {
|
||||
return "1"
|
||||
}
|
||||
return "01"
|
||||
case 'A':
|
||||
return "Monday"
|
||||
case 'a':
|
||||
return "Mon"
|
||||
case 'e':
|
||||
return "_2"
|
||||
case 'd':
|
||||
if flag == '-' || parsing {
|
||||
return "2"
|
||||
}
|
||||
return "02"
|
||||
case 'j':
|
||||
if flag == '-' {
|
||||
if parsing {
|
||||
return "__2"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return "002"
|
||||
case 'I':
|
||||
if flag == '-' || parsing {
|
||||
return "3"
|
||||
}
|
||||
return "03"
|
||||
case 'H':
|
||||
if flag == '-' && !parsing {
|
||||
return ""
|
||||
}
|
||||
return "15"
|
||||
case 'M':
|
||||
if flag == '-' || parsing {
|
||||
return "4"
|
||||
}
|
||||
return "04"
|
||||
case 'S':
|
||||
if flag == '-' || parsing {
|
||||
return "5"
|
||||
}
|
||||
return "05"
|
||||
case 'y':
|
||||
return "06"
|
||||
case 'Y':
|
||||
return "2006"
|
||||
case 'p':
|
||||
return "PM"
|
||||
case 'P':
|
||||
return "pm"
|
||||
case 'Z':
|
||||
return "MST"
|
||||
case 'z':
|
||||
if flag == ':' {
|
||||
if parsing {
|
||||
return "Z07:00"
|
||||
}
|
||||
return "-07:00"
|
||||
}
|
||||
if parsing {
|
||||
return "Z0700"
|
||||
}
|
||||
return "-0700"
|
||||
|
||||
case '+':
|
||||
if parsing {
|
||||
return "Mon Jan _2 15:4:5 MST 2006"
|
||||
}
|
||||
return "Mon Jan _2 15:04:05 MST 2006"
|
||||
case 'c':
|
||||
if parsing {
|
||||
return "Mon Jan _2 15:4:5 2006"
|
||||
}
|
||||
return "Mon Jan _2 15:04:05 2006"
|
||||
case 'v':
|
||||
return "_2-Jan-2006"
|
||||
case 'F':
|
||||
if parsing {
|
||||
return "2006-1-2"
|
||||
}
|
||||
return "2006-01-02"
|
||||
case 'D', 'x':
|
||||
if parsing {
|
||||
return "1/2/06"
|
||||
}
|
||||
return "01/02/06"
|
||||
case 'r':
|
||||
if parsing {
|
||||
return "3:4:5 PM"
|
||||
}
|
||||
return "03:04:05 PM"
|
||||
case 'T', 'X':
|
||||
if parsing {
|
||||
return "15:4:5"
|
||||
}
|
||||
return "15:04:05"
|
||||
case 'R':
|
||||
if parsing {
|
||||
return "15:4"
|
||||
}
|
||||
return "15:04"
|
||||
|
||||
case '%':
|
||||
return "%"
|
||||
case 't':
|
||||
return "\t"
|
||||
case 'n':
|
||||
return "\n"
|
||||
}
|
||||
}
|
||||
|
||||
// https://nsdateformatter.com/
|
||||
func uts35Pattern(spec, flag byte) string {
|
||||
switch spec {
|
||||
default:
|
||||
return ""
|
||||
|
||||
case 'B':
|
||||
return "MMMM"
|
||||
case 'b', 'h':
|
||||
return "MMM"
|
||||
case 'm':
|
||||
if flag == '-' {
|
||||
return "M"
|
||||
}
|
||||
return "MM"
|
||||
case 'A':
|
||||
return "EEEE"
|
||||
case 'a':
|
||||
return "E"
|
||||
case 'd':
|
||||
if flag == '-' {
|
||||
return "d"
|
||||
}
|
||||
return "dd"
|
||||
case 'j':
|
||||
if flag == '-' {
|
||||
return "D"
|
||||
}
|
||||
return "DDD"
|
||||
case 'I':
|
||||
if flag == '-' {
|
||||
return "h"
|
||||
}
|
||||
return "hh"
|
||||
case 'H':
|
||||
if flag == '-' {
|
||||
return "H"
|
||||
}
|
||||
return "HH"
|
||||
case 'M':
|
||||
if flag == '-' {
|
||||
return "m"
|
||||
}
|
||||
return "mm"
|
||||
case 'S':
|
||||
if flag == '-' {
|
||||
return "s"
|
||||
}
|
||||
return "ss"
|
||||
case 'y':
|
||||
return "yy"
|
||||
case 'Y':
|
||||
return "yyyy"
|
||||
case 'g':
|
||||
return "YY"
|
||||
case 'G':
|
||||
return "YYYY"
|
||||
case 'V':
|
||||
if flag == '-' {
|
||||
return "w"
|
||||
}
|
||||
return "ww"
|
||||
case 'p':
|
||||
return "a"
|
||||
case 'Z':
|
||||
return "zzz"
|
||||
case 'z':
|
||||
if flag == ':' {
|
||||
return "xxx"
|
||||
}
|
||||
return "xx"
|
||||
case 'L':
|
||||
return "SSS"
|
||||
case 'f':
|
||||
return "SSSSSS"
|
||||
case 'N':
|
||||
return "SSSSSSSSS"
|
||||
|
||||
case '+':
|
||||
return "E MMM d HH:mm:ss zzz yyyy"
|
||||
case 'c':
|
||||
return "E MMM d HH:mm:ss yyyy"
|
||||
case 'v':
|
||||
return "d-MMM-yyyy"
|
||||
case 'F':
|
||||
return "yyyy-MM-dd"
|
||||
case 'D', 'x':
|
||||
return "MM/dd/yy"
|
||||
case 'r':
|
||||
return "hh:mm:ss a"
|
||||
case 'T', 'X':
|
||||
return "HH:mm:ss"
|
||||
case 'R':
|
||||
return "HH:mm"
|
||||
|
||||
case '%':
|
||||
return "%"
|
||||
case 't':
|
||||
return "\t"
|
||||
case 'n':
|
||||
return "\n"
|
||||
}
|
||||
}
|
||||
|
||||
// http://man.he.net/man3/strftime
|
||||
func okModifier(mod, spec byte) bool {
|
||||
if mod == 'E' {
|
||||
return strings.Contains("cCxXyY", string(spec))
|
||||
}
|
||||
if mod == 'O' {
|
||||
return strings.Contains("deHImMSuUVwWy", string(spec))
|
||||
}
|
||||
return false
|
||||
}
|
||||
346
vendor/github.com/ncruces/go-strftime/strftime.go
generated
vendored
Normal file
346
vendor/github.com/ncruces/go-strftime/strftime.go
generated
vendored
Normal file
@@ -0,0 +1,346 @@
|
||||
package strftime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Format returns a textual representation of the time value
|
||||
// formatted according to the strftime format specification.
|
||||
func Format(fmt string, t time.Time) string {
|
||||
buf := buffer(fmt)
|
||||
return string(AppendFormat(buf, fmt, t))
|
||||
}
|
||||
|
||||
// AppendFormat is like Format, but appends the textual representation
|
||||
// to dst and returns the extended buffer.
|
||||
func AppendFormat(dst []byte, fmt string, t time.Time) []byte {
|
||||
var parser parser
|
||||
|
||||
parser.literal = func(b byte) error {
|
||||
dst = append(dst, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
parser.format = func(spec, flag byte) error {
|
||||
switch spec {
|
||||
case 'A':
|
||||
dst = append(dst, t.Weekday().String()...)
|
||||
return nil
|
||||
case 'a':
|
||||
dst = append(dst, t.Weekday().String()[:3]...)
|
||||
return nil
|
||||
case 'B':
|
||||
dst = append(dst, t.Month().String()...)
|
||||
return nil
|
||||
case 'b', 'h':
|
||||
dst = append(dst, t.Month().String()[:3]...)
|
||||
return nil
|
||||
case 'm':
|
||||
dst = appendInt2(dst, int(t.Month()), flag)
|
||||
return nil
|
||||
case 'd':
|
||||
dst = appendInt2(dst, int(t.Day()), flag)
|
||||
return nil
|
||||
case 'e':
|
||||
dst = appendInt2(dst, int(t.Day()), ' ')
|
||||
return nil
|
||||
case 'I':
|
||||
dst = append12Hour(dst, t, flag)
|
||||
return nil
|
||||
case 'l':
|
||||
dst = append12Hour(dst, t, ' ')
|
||||
return nil
|
||||
case 'H':
|
||||
dst = appendInt2(dst, t.Hour(), flag)
|
||||
return nil
|
||||
case 'k':
|
||||
dst = appendInt2(dst, t.Hour(), ' ')
|
||||
return nil
|
||||
case 'M':
|
||||
dst = appendInt2(dst, t.Minute(), flag)
|
||||
return nil
|
||||
case 'S':
|
||||
dst = appendInt2(dst, t.Second(), flag)
|
||||
return nil
|
||||
case 'L':
|
||||
dst = append(dst, t.Format(".000")[1:]...)
|
||||
return nil
|
||||
case 'f':
|
||||
dst = append(dst, t.Format(".000000")[1:]...)
|
||||
return nil
|
||||
case 'N':
|
||||
dst = append(dst, t.Format(".000000000")[1:]...)
|
||||
return nil
|
||||
case 'y':
|
||||
dst = t.AppendFormat(dst, "06")
|
||||
return nil
|
||||
case 'Y':
|
||||
dst = t.AppendFormat(dst, "2006")
|
||||
return nil
|
||||
case 'C':
|
||||
dst = t.AppendFormat(dst, "2006")
|
||||
dst = dst[:len(dst)-2]
|
||||
return nil
|
||||
case 'U':
|
||||
dst = appendWeekNumber(dst, t, flag, true)
|
||||
return nil
|
||||
case 'W':
|
||||
dst = appendWeekNumber(dst, t, flag, false)
|
||||
return nil
|
||||
case 'V':
|
||||
_, w := t.ISOWeek()
|
||||
dst = appendInt2(dst, w, flag)
|
||||
return nil
|
||||
case 'g':
|
||||
y, _ := t.ISOWeek()
|
||||
dst = year(y).AppendFormat(dst, "06")
|
||||
return nil
|
||||
case 'G':
|
||||
y, _ := t.ISOWeek()
|
||||
dst = year(y).AppendFormat(dst, "2006")
|
||||
return nil
|
||||
case 's':
|
||||
dst = strconv.AppendInt(dst, t.Unix(), 10)
|
||||
return nil
|
||||
case 'Q':
|
||||
dst = strconv.AppendInt(dst, t.UnixMilli(), 10)
|
||||
return nil
|
||||
case 'w':
|
||||
w := t.Weekday()
|
||||
dst = appendInt1(dst, int(w))
|
||||
return nil
|
||||
case 'u':
|
||||
if w := t.Weekday(); w == 0 {
|
||||
dst = append(dst, '7')
|
||||
} else {
|
||||
dst = appendInt1(dst, int(w))
|
||||
}
|
||||
return nil
|
||||
case 'j':
|
||||
if flag == '-' {
|
||||
dst = strconv.AppendInt(dst, int64(t.YearDay()), 10)
|
||||
} else {
|
||||
dst = t.AppendFormat(dst, "002")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if layout := goLayout(spec, flag, false); layout != "" {
|
||||
dst = t.AppendFormat(dst, layout)
|
||||
return nil
|
||||
}
|
||||
|
||||
dst = append(dst, '%')
|
||||
if flag != 0 {
|
||||
dst = append(dst, flag)
|
||||
}
|
||||
dst = append(dst, spec)
|
||||
return nil
|
||||
}
|
||||
|
||||
parser.parse(fmt)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Parse converts a textual representation of time to the time value it represents
|
||||
// according to the strptime format specification.
|
||||
//
|
||||
// The following specifiers are not supported for parsing:
|
||||
//
|
||||
// %g %k %l %s %u %w %C %G %Q %U %V %W
|
||||
//
|
||||
// You must also avoid digits and these letter sequences
|
||||
// in fmt literals:
|
||||
//
|
||||
// Jan Mon MST PM pm
|
||||
func Parse(fmt, value string) (time.Time, error) {
|
||||
pattern, err := layout(fmt, true)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Parse(pattern, value)
|
||||
}
|
||||
|
||||
// Layout converts a strftime format specification
|
||||
// to a Go time pattern specification.
|
||||
//
|
||||
// The following specifiers are not supported by Go patterns:
|
||||
//
|
||||
// %f %g %k %l %s %u %w %C %G %L %N %Q %U %V %W
|
||||
//
|
||||
// You must also avoid digits and these letter sequences
|
||||
// in fmt literals:
|
||||
//
|
||||
// Jan Mon MST PM pm
|
||||
func Layout(fmt string) (string, error) {
|
||||
return layout(fmt, false)
|
||||
}
|
||||
|
||||
func layout(fmt string, parsing bool) (string, error) {
|
||||
dst := buffer(fmt)
|
||||
var parser parser
|
||||
|
||||
parser.literal = func(b byte) error {
|
||||
if '0' <= b && b <= '9' {
|
||||
return literalErr(b)
|
||||
}
|
||||
dst = append(dst, b)
|
||||
if b == 'M' || b == 'T' || b == 'm' || b == 'n' {
|
||||
switch {
|
||||
case bytes.HasSuffix(dst, []byte("Jan")):
|
||||
return literalErr("Jan")
|
||||
case bytes.HasSuffix(dst, []byte("Mon")):
|
||||
return literalErr("Mon")
|
||||
case bytes.HasSuffix(dst, []byte("MST")):
|
||||
return literalErr("MST")
|
||||
case bytes.HasSuffix(dst, []byte("PM")):
|
||||
return literalErr("PM")
|
||||
case bytes.HasSuffix(dst, []byte("pm")):
|
||||
return literalErr("pm")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
parser.format = func(spec, flag byte) error {
|
||||
if layout := goLayout(spec, flag, parsing); layout != "" {
|
||||
dst = append(dst, layout...)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch spec {
|
||||
default:
|
||||
return formatError{}
|
||||
|
||||
case 'L', 'f', 'N':
|
||||
if bytes.HasSuffix(dst, []byte(".")) || bytes.HasSuffix(dst, []byte(",")) {
|
||||
switch spec {
|
||||
default:
|
||||
dst = append(dst, "000"...)
|
||||
case 'f':
|
||||
dst = append(dst, "000000"...)
|
||||
case 'N':
|
||||
dst = append(dst, "000000000"...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return formatError{message: "must follow '.' or ','"}
|
||||
}
|
||||
}
|
||||
|
||||
if err := parser.parse(fmt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(dst), nil
|
||||
}
|
||||
|
||||
// UTS35 converts a strftime format specification
|
||||
// to a Unicode Technical Standard #35 Date Format Pattern.
|
||||
//
|
||||
// The following specifiers are not supported by UTS35:
|
||||
//
|
||||
// %e %k %l %u %w %C %P %U %W
|
||||
func UTS35(fmt string) (string, error) {
|
||||
const quote = '\''
|
||||
var quoted bool
|
||||
dst := buffer(fmt)
|
||||
|
||||
var parser parser
|
||||
|
||||
parser.literal = func(b byte) error {
|
||||
if b == quote {
|
||||
dst = append(dst, quote, quote)
|
||||
return nil
|
||||
}
|
||||
if !quoted && ('a' <= b && b <= 'z' || 'A' <= b && b <= 'Z') {
|
||||
dst = append(dst, quote)
|
||||
quoted = true
|
||||
}
|
||||
dst = append(dst, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
parser.format = func(spec, flag byte) error {
|
||||
if quoted {
|
||||
dst = append(dst, quote)
|
||||
quoted = false
|
||||
}
|
||||
if pattern := uts35Pattern(spec, flag); pattern != "" {
|
||||
dst = append(dst, pattern...)
|
||||
return nil
|
||||
}
|
||||
return formatError{}
|
||||
}
|
||||
|
||||
if err := parser.parse(fmt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if quoted {
|
||||
dst = append(dst, quote)
|
||||
}
|
||||
return string(dst), nil
|
||||
}
|
||||
|
||||
func buffer(format string) (buf []byte) {
|
||||
const bufSize = 64
|
||||
max := len(format) + 10
|
||||
if max < bufSize {
|
||||
var b [bufSize]byte
|
||||
buf = b[:0]
|
||||
} else {
|
||||
buf = make([]byte, 0, max)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func year(y int) time.Time {
|
||||
return time.Date(y, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func appendWeekNumber(dst []byte, t time.Time, flag byte, sunday bool) []byte {
|
||||
offset := int(t.Weekday())
|
||||
if sunday {
|
||||
offset = 6 - offset
|
||||
} else if offset != 0 {
|
||||
offset = 7 - offset
|
||||
}
|
||||
return appendInt2(dst, (t.YearDay()+offset)/7, flag)
|
||||
}
|
||||
|
||||
func append12Hour(dst []byte, t time.Time, flag byte) []byte {
|
||||
h := t.Hour()
|
||||
if h == 0 {
|
||||
h = 12
|
||||
} else if h > 12 {
|
||||
h -= 12
|
||||
}
|
||||
return appendInt2(dst, h, flag)
|
||||
}
|
||||
|
||||
func appendInt1(dst []byte, i int) []byte {
|
||||
return append(dst, byte('0'+i))
|
||||
}
|
||||
|
||||
func appendInt2(dst []byte, i int, flag byte) []byte {
|
||||
if flag == 0 || i >= 10 {
|
||||
return append(dst, smallsString[i*2:i*2+2]...)
|
||||
}
|
||||
if flag == ' ' {
|
||||
dst = append(dst, flag)
|
||||
}
|
||||
return appendInt1(dst, i)
|
||||
}
|
||||
|
||||
const smallsString = "" +
|
||||
"00010203040506070809" +
|
||||
"10111213141516171819" +
|
||||
"20212223242526272829" +
|
||||
"30313233343536373839" +
|
||||
"40414243444546474849" +
|
||||
"50515253545556575859" +
|
||||
"60616263646566676869" +
|
||||
"70717273747576777879" +
|
||||
"80818283848586878889" +
|
||||
"90919293949596979899"
|
||||
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.sw?
|
||||
*.test
|
||||
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
||||
192
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
192
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
[run]
|
||||
# This is needed for precious, which may run multiple instances
|
||||
# in parallel
|
||||
allow-parallel-runners = true
|
||||
go = "1.21"
|
||||
tests = true
|
||||
timeout = "10m"
|
||||
|
||||
[linters]
|
||||
enable-all = true
|
||||
disable = [
|
||||
"cyclop",
|
||||
"depguard",
|
||||
"err113",
|
||||
"execinquery",
|
||||
"exhaustive",
|
||||
"exhaustruct",
|
||||
"forcetypeassert",
|
||||
"funlen",
|
||||
"gochecknoglobals",
|
||||
"godox",
|
||||
"gomnd",
|
||||
"inamedparam",
|
||||
"interfacebloat",
|
||||
"mnd",
|
||||
"nlreturn",
|
||||
"nonamedreturns",
|
||||
"paralleltest",
|
||||
"thelper",
|
||||
"testpackage",
|
||||
|
||||
"varnamelen",
|
||||
"wrapcheck",
|
||||
"wsl",
|
||||
|
||||
# Require Go 1.22
|
||||
"copyloopvar",
|
||||
"intrange",
|
||||
]
|
||||
|
||||
[linters-settings.errorlint]
|
||||
errorf = true
|
||||
asserts = true
|
||||
comparison = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters-settings.forbidigo]
|
||||
# Forbid the following identifiers
|
||||
forbid = [
|
||||
{ p = "Geoip", msg = "you should use `GeoIP`" },
|
||||
{ p = "geoIP", msg = "you should use `geoip`" },
|
||||
{ p = "Maxmind", msg = "you should use `MaxMind`" },
|
||||
{ p = "^maxMind", msg = "you should use `maxmind`" },
|
||||
{ p = "Minfraud", msg = "you should use `MinFraud`" },
|
||||
{ p = "^minFraud", msg = "you should use `minfraud`" },
|
||||
{ p = "^math.Max$", msg = "you should use the max built-in instead." },
|
||||
{ p = "^math.Min$", msg = "you should use the min built-in instead." },
|
||||
{ p = "^os.IsNotExist", msg = "As per their docs, new code should use errors.Is(err, fs.ErrNotExist)." },
|
||||
{ p = "^os.IsExist", msg = "As per their docs, new code should use errors.Is(err, fs.ErrExist)" },
|
||||
]
|
||||
|
||||
[linters-settings.gci]
|
||||
sections = ["standard", "default", "prefix(github.com/oschwald/maxminddb-golang)"]
|
||||
|
||||
[linters-settings.gofumpt]
|
||||
extra-rules = true
|
||||
|
||||
[linters-settings.govet]
|
||||
enable-all = true
|
||||
disable = "shadow"
|
||||
|
||||
[linters-settings.lll]
|
||||
line-length = 120
|
||||
tab-width = 4
|
||||
|
||||
[linters-settings.misspell]
|
||||
locale = "US"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshall"
|
||||
correction = "marshal"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshalling"
|
||||
correction = "marshaling"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshalls"
|
||||
correction = "marshals"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshall"
|
||||
correction = "unmarshal"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshalling"
|
||||
correction = "unmarshaling"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshalls"
|
||||
correction = "unmarshals"
|
||||
|
||||
[linters-settings.nolintlint]
|
||||
allow-unused = false
|
||||
allow-no-explanation = ["lll", "misspell"]
|
||||
require-explanation = true
|
||||
require-specific = true
|
||||
|
||||
[linters-settings.revive]
|
||||
enable-all-rules = true
|
||||
ignore-generated-header = true
|
||||
severity = "warning"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "add-constant"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "cognitive-complexity"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "confusing-naming"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "confusing-results"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "cyclomatic"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "deep-exit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "flag-parameter"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "function-length"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "function-result-limit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "line-length-limit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "max-public-structs"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "nested-structs"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unchecked-type-assertion"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unhandled-error"
|
||||
disabled = true
|
||||
|
||||
[linters-settings.tagliatelle.case.rules]
|
||||
avro = "snake"
|
||||
bson = "snake"
|
||||
env = "upperSnake"
|
||||
envconfig = "upperSnake"
|
||||
json = "snake"
|
||||
mapstructure = "snake"
|
||||
xml = "snake"
|
||||
yaml = "snake"
|
||||
|
||||
[linters-settings.unparam]
|
||||
check-exported = true
|
||||
|
||||
|
||||
[[issues.exclude-rules]]
|
||||
linters = [
|
||||
"govet",
|
||||
"revive",
|
||||
]
|
||||
path = "_test.go"
|
||||
text = "fieldalignment:"
|
||||
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
Normal file
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
36
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
36
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# MaxMind DB Reader for Go #
|
||||
|
||||
[](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
||||
900
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
Normal file
900
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,900 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
type dataType int
|
||||
|
||||
const (
|
||||
_Extended dataType = iota
|
||||
_Pointer
|
||||
_String
|
||||
_Float64
|
||||
_Bytes
|
||||
_Uint16
|
||||
_Uint32
|
||||
_Map
|
||||
_Int32
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
// We don't use the next two. They are placeholders. See the spec
|
||||
// for more details.
|
||||
_Container //nolint: deadcode, varcheck // above
|
||||
_Marker //nolint: deadcode, varcheck // above
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the value used in libmaxminddb.
|
||||
maximumDataStructureDepth = 512
|
||||
)
|
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
|
||||
result.Set(reflect.ValueOf(uintptr(offset)))
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeToDeserializer(
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
getNext bool,
|
||||
) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
skip, err := dser.ShouldSkip(uintptr(offset))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if skip {
|
||||
if getNext {
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
|
||||
newOffset := offset + 1
|
||||
if offset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
ctrlByte := d.buffer[offset]
|
||||
|
||||
typeNum := dataType(ctrlByte >> 5)
|
||||
if typeNum == _Extended {
|
||||
if newOffset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
typeNum = dataType(d.buffer[newOffset] + 7)
|
||||
newOffset++
|
||||
}
|
||||
|
||||
var size uint
|
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
|
||||
return typeNum, size, newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) sizeFromCtrlByte(
|
||||
ctrlByte byte,
|
||||
offset uint,
|
||||
typeNum dataType,
|
||||
) (uint, uint, error) {
|
||||
size := uint(ctrlByte & 0x1f)
|
||||
if typeNum == _Extended {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
var bytesToRead uint
|
||||
if size < 29 {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
bytesToRead = size - 28
|
||||
newOffset := offset + bytesToRead
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
if size == 29 {
|
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil
|
||||
}
|
||||
|
||||
sizeBytes := d.buffer[offset:newOffset]
|
||||
|
||||
switch {
|
||||
case size == 30:
|
||||
size = 285 + uintFromBytes(0, sizeBytes)
|
||||
case size > 30:
|
||||
size = uintFromBytes(0, sizeBytes) + 65821
|
||||
}
|
||||
return size, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromType(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = indirect(result)
|
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
return unmarshalBool(size, offset, result)
|
||||
case _Map:
|
||||
return d.unmarshalMap(size, offset, result, depth)
|
||||
case _Pointer:
|
||||
return d.unmarshalPointer(size, offset, result, depth)
|
||||
case _Slice:
|
||||
return d.unmarshalSlice(size, offset, result, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
return d.unmarshalBytes(size, offset, result)
|
||||
case _Float32:
|
||||
return d.unmarshalFloat32(size, offset, result)
|
||||
case _Float64:
|
||||
return d.unmarshalFloat64(size, offset, result)
|
||||
case _Int32:
|
||||
return d.unmarshalInt32(size, offset, result)
|
||||
case _String:
|
||||
return d.unmarshalString(size, offset, result)
|
||||
case _Uint16:
|
||||
return d.unmarshalUint(size, offset, result, 16)
|
||||
case _Uint32:
|
||||
return d.unmarshalUint(size, offset, result, 32)
|
||||
case _Uint64:
|
||||
return d.unmarshalUint(size, offset, result, 64)
|
||||
case _Uint128:
|
||||
return d.unmarshalUint128(size, offset, result)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromTypeToDeserializer(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
v, offset := decodeBool(size, offset)
|
||||
return offset, dser.Bool(v)
|
||||
case _Map:
|
||||
return d.decodeMapToDeserializer(size, offset, dser, depth)
|
||||
case _Pointer:
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decodeToDeserializer(pointer, dser, depth, false)
|
||||
return newOffset, err
|
||||
case _Slice:
|
||||
return d.decodeSliceToDeserializer(size, offset, dser, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
v, offset := d.decodeBytes(size, offset)
|
||||
return offset, dser.Bytes(v)
|
||||
case _Float32:
|
||||
v, offset := d.decodeFloat32(size, offset)
|
||||
return offset, dser.Float32(v)
|
||||
case _Float64:
|
||||
v, offset := d.decodeFloat64(size, offset)
|
||||
return offset, dser.Float64(v)
|
||||
case _Int32:
|
||||
v, offset := d.decodeInt(size, offset)
|
||||
return offset, dser.Int32(int32(v))
|
||||
case _String:
|
||||
v, offset := d.decodeString(size, offset)
|
||||
return offset, dser.String(v)
|
||||
case _Uint16:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint16(uint16(v))
|
||||
case _Uint32:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint32(uint32(v))
|
||||
case _Uint64:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint64(v)
|
||||
case _Uint128:
|
||||
v, offset := d.decodeUint128(size, offset)
|
||||
return offset, dser.Uint128(v)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalBool(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (bool size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := decodeBool(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func indirect(result reflect.Value) reflect.Value {
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() {
|
||||
e := result.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
result = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if result.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.New(result.Type().Elem()))
|
||||
}
|
||||
|
||||
result = result.Elem()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeBytes(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
result.SetBytes(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat32(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
result.SetFloat(float64(value))
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float 64 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat64(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
return 0, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
result.SetFloat(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (int32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeInt(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
n := uint64(value)
|
||||
if !result.OverflowUint(n) {
|
||||
result.SetUint(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = indirect(result)
|
||||
switch result.Kind() {
|
||||
default:
|
||||
return 0, newUnmarshalTypeStrError("map", result.Type())
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(size, offset, result, depth)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
rv := reflect.ValueOf(make(map[string]any, size))
|
||||
newOffset, err := d.decodeMap(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
return 0, newUnmarshalTypeStrError("map", result.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalPointer(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decode(pointer, result, depth)
|
||||
return newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
a := []any{}
|
||||
rv := reflect.ValueOf(&a).Elem()
|
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
}
|
||||
return 0, newUnmarshalTypeStrError("array", result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeString(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalUint(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
uintType uint,
|
||||
) (uint, error) {
|
||||
if size > uintType/8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint%v size of %v)",
|
||||
uintType,
|
||||
size,
|
||||
)
|
||||
}
|
||||
|
||||
value, newOffset := d.decodeUint(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
if !result.OverflowUint(value) {
|
||||
result.SetUint(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{})
|
||||
|
||||
func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint128 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeUint128(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
if result.Type() == bigIntType {
|
||||
result.Set(reflect.ValueOf(*value))
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func decodeBool(size, offset uint) (bool, uint) {
|
||||
return size != 0, offset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size, offset uint) (int, uint) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
|
||||
}
|
||||
|
||||
mapType := result.Type()
|
||||
keyValue := reflect.New(mapType.Key()).Elem()
|
||||
elemType := mapType.Elem()
|
||||
var elemValue reflect.Value
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if elemValue.IsValid() {
|
||||
// After 1.20 is the minimum supported version, this can just be
|
||||
// elemValue.SetZero()
|
||||
reflectSetZero(elemValue)
|
||||
} else {
|
||||
elemValue = reflect.New(elemType).Elem()
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, elemValue, depth)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("decoding value for %s: %w", key, err)
|
||||
}
|
||||
|
||||
keyValue.SetString(string(key))
|
||||
result.SetMapIndex(keyValue, elemValue)
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMapToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartMap(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
// TODO - implement key/value skipping?
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePointer(
|
||||
size uint,
|
||||
offset uint,
|
||||
) (uint, uint, error) {
|
||||
pointerSize := ((size >> 3) & 0x3) + 1
|
||||
newOffset := offset + pointerSize
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
pointerBytes := d.buffer[offset:newOffset]
|
||||
var prefix uint
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = size & 0x7
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
var pointerValueOffset uint
|
||||
switch pointerSize {
|
||||
case 1:
|
||||
pointerValueOffset = 0
|
||||
case 2:
|
||||
pointerValueOffset = 2048
|
||||
case 3:
|
||||
pointerValueOffset = 526336
|
||||
case 4:
|
||||
pointerValueOffset = 0
|
||||
}
|
||||
|
||||
pointer := unpacked + pointerValueOffset
|
||||
|
||||
return pointer, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
|
||||
for i := 0; i < int(size); i++ {
|
||||
var err error
|
||||
offset, err = d.decode(offset, result.Index(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSliceToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartSlice(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size, offset uint) (string, uint) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
fields := cachedFields(result)
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ {
|
||||
var (
|
||||
err error
|
||||
key []byte
|
||||
)
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)]
|
||||
if !ok {
|
||||
offset, err = d.nextValueOffset(offset, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("decoding value for %s: %w", key, err)
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var fieldsMap sync.Map
|
||||
|
||||
func cachedFields(result reflect.Value) *fieldsType {
|
||||
resultType := result.Type()
|
||||
|
||||
if fields, ok := fieldsMap.Load(resultType); ok {
|
||||
return fields.(*fieldsType)
|
||||
}
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fields := &fieldsType{namedFields, anonymous}
|
||||
fieldsMap.Store(resultType, fields)
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size, offset uint) (uint64, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
var val uint64
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
val := prefix
|
||||
for _, b := range uintBytes {
|
||||
val = (val << 8) | uint(b)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
|
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if typeNum == _Pointer {
|
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
key, _, err := d.decodeKey(pointer)
|
||||
return key, ptrOffset, err
|
||||
}
|
||||
if typeNum != _String {
|
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
|
||||
}
|
||||
newOffset := dataOffset + size
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return nil, 0, newOffsetError()
|
||||
}
|
||||
return d.buffer[dataOffset:newOffset], newOffset, nil
|
||||
}
|
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types.
|
||||
func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) {
|
||||
if numberToSkip == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
typeNum, size, offset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch typeNum {
|
||||
case _Pointer:
|
||||
_, offset, err = d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case _Map:
|
||||
numberToSkip += 2 * size
|
||||
case _Slice:
|
||||
numberToSkip += size
|
||||
case _Bool:
|
||||
default:
|
||||
offset += size
|
||||
}
|
||||
return d.nextValueOffset(offset, numberToSkip-1)
|
||||
}
|
||||
31
vendor/github.com/oschwald/maxminddb-golang/deserializer.go
generated
vendored
Normal file
31
vendor/github.com/oschwald/maxminddb-golang/deserializer.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package maxminddb
|
||||
|
||||
import "math/big"
|
||||
|
||||
// deserializer is an interface for a type that deserializes an MaxMind DB
|
||||
// data record to some other type. This exists as an alternative to the
|
||||
// standard reflection API.
|
||||
//
|
||||
// This is fundamentally different than the Unmarshaler interface that
|
||||
// several packages provide. A Deserializer will generally create the
|
||||
// final struct or value rather than unmarshaling to itself.
|
||||
//
|
||||
// This interface and the associated unmarshaling code is EXPERIMENTAL!
|
||||
// It is not currently covered by any Semantic Versioning guarantees.
|
||||
// Use at your own risk.
|
||||
type deserializer interface {
|
||||
ShouldSkip(offset uintptr) (bool, error)
|
||||
StartSlice(size uint) error
|
||||
StartMap(size uint) error
|
||||
End() error
|
||||
String(string) error
|
||||
Float64(float64) error
|
||||
Bytes([]byte) error
|
||||
Uint16(uint16) error
|
||||
Uint32(uint32) error
|
||||
Int32(int32) error
|
||||
Uint64(uint64) error
|
||||
Uint128(*big.Int) error
|
||||
Bool(bool) error
|
||||
Float32(float32) error
|
||||
}
|
||||
46
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
Normal file
46
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newOffsetError() InvalidDatabaseError {
|
||||
return InvalidDatabaseError{"unexpected end of database"}
|
||||
}
|
||||
|
||||
func newInvalidDatabaseError(format string, args ...any) InvalidDatabaseError {
|
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
func (e InvalidDatabaseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct {
|
||||
Type reflect.Type
|
||||
Value string
|
||||
}
|
||||
|
||||
func newUnmarshalTypeStrError(value string, rType reflect.Type) UnmarshalTypeError {
|
||||
return UnmarshalTypeError{
|
||||
Type: rType,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func newUnmarshalTypeError(value any, rType reflect.Type) UnmarshalTypeError {
|
||||
return newUnmarshalTypeStrError(fmt.Sprintf("%v (%T)", value, value), rType)
|
||||
}
|
||||
|
||||
func (e UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type)
|
||||
}
|
||||
16
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
Normal file
16
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !windows && !appengine && !plan9 && !js && !wasip1 && !wasi
|
||||
// +build !windows,!appengine,!plan9,!js,!wasip1,!wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(fd, length int) (data []byte, err error) {
|
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
||||
86
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
Normal file
86
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
//go:build windows && !appengine
|
||||
// +build windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type memoryMap []byte
|
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]windows.Handle{}
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
|
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
|
||||
0, uintptr(length))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := memoryMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = length
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := windows.FlushViewOfFile(addr, len)
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
m := memoryMap(b)
|
||||
dh := m.header()
|
||||
|
||||
addr := dh.Data
|
||||
length := uintptr(dh.Len)
|
||||
|
||||
flush(addr, length)
|
||||
err = windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
58
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
58
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package maxminddb
|
||||
|
||||
type nodeReader interface {
|
||||
readLeft(uint) uint
|
||||
readRight(uint) uint
|
||||
}
|
||||
|
||||
type nodeReader24 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader24) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader24) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+3]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 8) |
|
||||
uint(n.buffer[nodeNumber+5])
|
||||
}
|
||||
|
||||
type nodeReader28 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader28) readLeft(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) |
|
||||
(uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader28) readRight(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 8) |
|
||||
uint(n.buffer[nodeNumber+6])
|
||||
}
|
||||
|
||||
type nodeReader32 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader32) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+2]) << 8) |
|
||||
uint(n.buffer[nodeNumber+3])
|
||||
}
|
||||
|
||||
func (n nodeReader32) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+4]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+6]) << 8) |
|
||||
uint(n.buffer[nodeNumber+7])
|
||||
}
|
||||
310
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
Normal file
310
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
// Package maxminddb provides a reader for the MaxMind DB file format.
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0)
|
||||
|
||||
dataSectionSeparatorSize = 16
|
||||
)
|
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
//
|
||||
// All of the methods on Reader are thread-safe. The struct may be safely
|
||||
// shared across goroutines.
|
||||
type Reader struct {
|
||||
nodeReader nodeReader
|
||||
buffer []byte
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
ipv4StartBitDepth int
|
||||
nodeOffsetMult uint
|
||||
hasMappedFile bool
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// it has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
Description map[string]string `maxminddb:"description"`
|
||||
DatabaseType string `maxminddb:"database_type"`
|
||||
Languages []string `maxminddb:"languages"`
|
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
|
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
|
||||
BuildEpoch uint `maxminddb:"build_epoch"`
|
||||
IPVersion uint `maxminddb:"ip_version"`
|
||||
NodeCount uint `maxminddb:"node_count"`
|
||||
RecordSize uint `maxminddb:"record_size"`
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) {
|
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
|
||||
|
||||
if metadataStart == -1 {
|
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
|
||||
}
|
||||
|
||||
metadataStart += len(metadataStartMarker)
|
||||
metadataDecoder := decoder{buffer[metadataStart:]}
|
||||
|
||||
var metadata Metadata
|
||||
|
||||
rvMetadata := reflect.ValueOf(&metadata)
|
||||
_, err := metadataDecoder.decode(0, rvMetadata, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
|
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
|
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
|
||||
if dataSectionStart > dataSectionEnd {
|
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
|
||||
}
|
||||
d := decoder{
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
nodeBuffer := buffer[:searchTreeSize]
|
||||
var nodeReader nodeReader
|
||||
switch metadata.RecordSize {
|
||||
case 24:
|
||||
nodeReader = nodeReader24{buffer: nodeBuffer}
|
||||
case 28:
|
||||
nodeReader = nodeReader28{buffer: nodeBuffer}
|
||||
case 32:
|
||||
nodeReader = nodeReader32{buffer: nodeBuffer}
|
||||
default:
|
||||
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
nodeReader: nodeReader,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
nodeOffsetMult: metadata.RecordSize / 4,
|
||||
}
|
||||
|
||||
reader.setIPv4Start()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) setIPv4Start() {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
i := 0
|
||||
for ; i < 96 && node < nodeCount; i++ {
|
||||
node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
|
||||
}
|
||||
r.ipv4Start = node
|
||||
r.ipv4StartBitDepth = i
|
||||
}
|
||||
|
||||
// Lookup retrieves the database record for ip and stores it in the value
|
||||
// pointed to by result. If result is nil or not a pointer, an error is
|
||||
// returned. If the data in the database record cannot be stored in result
|
||||
// because of type differences, an UnmarshalTypeError is returned. If the
|
||||
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
|
||||
// is returned.
|
||||
func (r *Reader) Lookup(ip net.IP, result any) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupNetwork retrieves the database record for ip and stores it in the
|
||||
// value pointed to by result. The network returned is the network associated
|
||||
// with the data record in the database. The ok return value indicates whether
|
||||
// the database contained a record for the ip.
|
||||
//
|
||||
// If result is nil or not a pointer, an error is returned. If the data in the
|
||||
// database record cannot be stored in result because of type differences, an
|
||||
// UnmarshalTypeError is returned. If the database is invalid or otherwise
|
||||
// cannot be read, an InvalidDatabaseError is returned.
|
||||
func (r *Reader) LookupNetwork(
|
||||
ip net.IP,
|
||||
result any,
|
||||
) (network *net.IPNet, ok bool, err error) {
|
||||
if r.buffer == nil {
|
||||
return nil, false, errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, prefixLength, ip, err := r.lookupPointer(ip)
|
||||
|
||||
network = r.cidr(ip, prefixLength)
|
||||
if pointer == 0 || err != nil {
|
||||
return network, false, err
|
||||
}
|
||||
|
||||
return network, true, r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
|
||||
// This is necessary as the node that the IPv4 start is at may
|
||||
// be at a bit depth that is less that 96, i.e., ipv4Start points
|
||||
// to a leaf node. For instance, if a record was inserted at ::/8,
|
||||
// the ipv4Start would point directly at the leaf node for the
|
||||
// record and would have a bit depth of 8. This would not happen
|
||||
// with databases currently distributed by MaxMind as all of them
|
||||
// have an IPv4 subtree that is greater than a single node.
|
||||
if r.Metadata.IPVersion == 6 &&
|
||||
len(ip) == net.IPv4len &&
|
||||
r.ipv4StartBitDepth != 96 {
|
||||
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
|
||||
}
|
||||
|
||||
mask := net.CIDRMask(prefixLength, len(ip)*8)
|
||||
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty any value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result any) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Decode on a closed database")
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) decode(offset uintptr, result any) error {
|
||||
rv := reflect.ValueOf(result)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return errors.New("result param must be a pointer")
|
||||
}
|
||||
|
||||
if dser, ok := result.(deserializer); ok {
|
||||
_, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := r.decoder.decode(uint(offset), rv, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
|
||||
if ip == nil {
|
||||
return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ip.To4()
|
||||
if ipV4Address != nil {
|
||||
ip = ipV4Address
|
||||
}
|
||||
if len(ip) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, 0, ip, fmt.Errorf(
|
||||
"error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database",
|
||||
ip.String(),
|
||||
)
|
||||
}
|
||||
|
||||
bitCount := uint(len(ip) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
node, prefixLength := r.traverseTree(ip, node, bitCount)
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, prefixLength, ip, nil
|
||||
} else if node > nodeCount {
|
||||
return node, prefixLength, ip, nil
|
||||
}
|
||||
|
||||
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) {
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
i := uint(0)
|
||||
for ; i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
offset := node * r.nodeOffsetMult
|
||||
if bit == 0 {
|
||||
node = r.nodeReader.readLeft(offset)
|
||||
} else {
|
||||
node = r.nodeReader.readRight(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return node, int(i)
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result any) error {
|
||||
offset, err := r.resolveDataPointer(pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved >= uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
||||
26
vendor/github.com/oschwald/maxminddb-golang/reader_memory.go
generated
vendored
Normal file
26
vendor/github.com/oschwald/maxminddb-golang/reader_memory.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
//go:build appengine || plan9 || js || wasip1 || wasi
|
||||
// +build appengine plan9 js wasip1 wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map
|
||||
// on supported platforms. On platforms without memory map support, such
|
||||
// as WebAssembly or Google App Engine, the database is loaded into memory.
|
||||
// Use the Close method on the Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
bytes, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(bytes)
|
||||
}
|
||||
|
||||
// Close returns the resources used by the database to the system.
|
||||
func (r *Reader) Close() error {
|
||||
r.buffer = nil
|
||||
return nil
|
||||
}
|
||||
64
vendor/github.com/oschwald/maxminddb-golang/reader_mmap.go
generated
vendored
Normal file
64
vendor/github.com/oschwald/maxminddb-golang/reader_mmap.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
//go:build !appengine && !plan9 && !js && !wasip1 && !wasi
|
||||
// +build !appengine,!plan9,!js,!wasip1,!wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map
|
||||
// on supported platforms. On platforms without memory map support, such
|
||||
// as WebAssembly or Google App Engine, the database is loaded into memory.
|
||||
// Use the Close method on the Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mapFile.Close(); err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Close returns the resources used by the database to the system.
|
||||
func (r *Reader) Close() error {
|
||||
var err error
|
||||
if r.hasMappedFile {
|
||||
runtime.SetFinalizer(r, nil)
|
||||
r.hasMappedFile = false
|
||||
err = munmap(r.buffer)
|
||||
}
|
||||
r.buffer = nil
|
||||
return err
|
||||
}
|
||||
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_120.go
generated
vendored
Normal file
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_120.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
func reflectSetZero(v reflect.Value) {
|
||||
v.SetZero()
|
||||
}
|
||||
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_pre120.go
generated
vendored
Normal file
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_pre120.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !go1.20
|
||||
// +build !go1.20
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
func reflectSetZero(v reflect.Value) {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
}
|
||||
211
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
Normal file
211
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
ip net.IP
|
||||
bit uint
|
||||
pointer uint
|
||||
}
|
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct {
|
||||
err error
|
||||
reader *Reader
|
||||
nodes []netNode
|
||||
lastNode netNode
|
||||
skipAliasedNetworks bool
|
||||
}
|
||||
|
||||
var (
|
||||
allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
|
||||
allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
|
||||
)
|
||||
|
||||
// NetworksOption are options for Networks and NetworksWithin.
|
||||
type NetworksOption func(*Networks)
|
||||
|
||||
// SkipAliasedNetworks is an option for Networks and NetworksWithin that
|
||||
// makes them not iterate over aliases of the IPv4 subtree in an IPv6
|
||||
// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
|
||||
//
|
||||
// You most likely want to set this. The only reason it isn't the default
|
||||
// behavior is to provide backwards compatibility to existing users.
|
||||
func SkipAliasedNetworks(networks *Networks) {
|
||||
networks.skipAliasedNetworks = true
|
||||
}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
func (r *Reader) Networks(options ...NetworksOption) *Networks {
|
||||
var networks *Networks
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
networks = r.NetworksWithin(allIPv6, options...)
|
||||
} else {
|
||||
networks = r.NetworksWithin(allIPv4, options...)
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// NetworksWithin returns an iterator that can be used to traverse all networks
|
||||
// in the database which are contained in a given network.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
//
|
||||
// If the provided network is contained within a network in the database, the
|
||||
// iterator will iterate over exactly one network, the containing network.
|
||||
func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks {
|
||||
if r.Metadata.IPVersion == 4 && network.IP.To4() == nil {
|
||||
return &Networks{
|
||||
err: fmt.Errorf(
|
||||
"error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database",
|
||||
network.String(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
networks := &Networks{reader: r}
|
||||
for _, option := range options {
|
||||
option(networks)
|
||||
}
|
||||
|
||||
ip := network.IP
|
||||
prefixLength, _ := network.Mask.Size()
|
||||
|
||||
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
|
||||
if networks.skipAliasedNetworks {
|
||||
ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]}
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
prefixLength += 96
|
||||
}
|
||||
|
||||
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
|
||||
|
||||
// We could skip this when bit >= prefixLength if we assume that the network
|
||||
// passed in is in canonical form. However, given that this may not be the
|
||||
// case, it is safest to always take the mask. If this is hot code at some
|
||||
// point, we could eliminate the allocation of the net.IPMask by zeroing
|
||||
// out the bits in ip directly.
|
||||
ip = ip.Mask(net.CIDRMask(bit, len(ip)*8))
|
||||
networks.nodes = []netNode{
|
||||
{
|
||||
ip: ip,
|
||||
bit: uint(bit),
|
||||
pointer: pointer,
|
||||
},
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool {
|
||||
if n.err != nil {
|
||||
return false
|
||||
}
|
||||
for len(n.nodes) > 0 {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for node.pointer != n.reader.Metadata.NodeCount {
|
||||
// This skips IPv4 aliases without hardcoding the networks that the writer
|
||||
// currently aliases.
|
||||
if n.skipAliasedNetworks && n.reader.ipv4Start != 0 &&
|
||||
node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) {
|
||||
break
|
||||
}
|
||||
|
||||
if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
}
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
offset := node.pointer * n.reader.nodeOffsetMult
|
||||
rightPointer := n.reader.nodeReader.readRight(offset)
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer = n.reader.nodeReader.readLeft(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result any) (*net.IPNet, error) {
|
||||
if n.err != nil {
|
||||
return nil, n.err
|
||||
}
|
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ip := n.lastNode.ip
|
||||
prefixLength := int(n.lastNode.bit)
|
||||
|
||||
// We do this because uses of SkipAliasedNetworks expect the IPv4 networks
|
||||
// to be returned as IPv4 networks. If we are not skipping aliased
|
||||
// networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
|
||||
// network as Go automatically converts those.
|
||||
if n.skipAliasedNetworks && isInIPv4Subtree(ip) {
|
||||
ip = ip[12:]
|
||||
prefixLength -= 96
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: ip,
|
||||
Mask: net.CIDRMask(prefixLength, len(ip)*8),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error {
|
||||
return n.err
|
||||
}
|
||||
|
||||
// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
|
||||
// IPv4 subtree.
|
||||
func isInIPv4Subtree(ip net.IP) bool {
|
||||
if len(ip) != 16 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < 12; i++ {
|
||||
if ip[i] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
201
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
Normal file
201
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type verifier struct {
|
||||
reader *Reader
|
||||
}
|
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error {
|
||||
v := verifier{r}
|
||||
if err := v.verifyMetadata(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := v.verifyDatabase()
|
||||
runtime.KeepAlive(v.reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *verifier) verifyMetadata() error {
|
||||
metadata := v.reader.Metadata
|
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 {
|
||||
return testError(
|
||||
"binary_format_major_version",
|
||||
2,
|
||||
metadata.BinaryFormatMajorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 {
|
||||
return testError(
|
||||
"binary_format_minor_version",
|
||||
0,
|
||||
metadata.BinaryFormatMinorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.DatabaseType == "" {
|
||||
return testError(
|
||||
"database_type",
|
||||
"non-empty string",
|
||||
metadata.DatabaseType,
|
||||
)
|
||||
}
|
||||
|
||||
if len(metadata.Description) == 0 {
|
||||
return testError(
|
||||
"description",
|
||||
"non-empty slice",
|
||||
metadata.Description,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
|
||||
return testError(
|
||||
"ip_version",
|
||||
"4 or 6",
|
||||
metadata.IPVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.RecordSize != 24 &&
|
||||
metadata.RecordSize != 28 &&
|
||||
metadata.RecordSize != 32 {
|
||||
return testError(
|
||||
"record_size",
|
||||
"24, 28, or 32",
|
||||
metadata.RecordSize,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.NodeCount == 0 {
|
||||
return testError(
|
||||
"node_count",
|
||||
"positive integer",
|
||||
metadata.NodeCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDatabase() error {
|
||||
offsets, err := v.verifySearchTree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDataSection(offsets)
|
||||
}
|
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
|
||||
offsets := make(map[uint]bool)
|
||||
|
||||
it := v.reader.Networks()
|
||||
for it.Next() {
|
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[uint(offset)] = true
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error {
|
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
|
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
|
||||
|
||||
for _, b := range separator {
|
||||
if b != 0 {
|
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
|
||||
pointerCount := len(offsets)
|
||||
|
||||
decoder := v.reader.decoder
|
||||
|
||||
var offset uint
|
||||
bufferLen := uint(len(decoder.buffer))
|
||||
for offset < bufferLen {
|
||||
var data any
|
||||
rv := reflect.ValueOf(&data)
|
||||
newOffset, err := decoder.decode(offset, rv, 0)
|
||||
if err != nil {
|
||||
return newInvalidDatabaseError(
|
||||
"received decoding error (%v) at offset of %v",
|
||||
err,
|
||||
offset,
|
||||
)
|
||||
}
|
||||
if newOffset <= offset {
|
||||
return newInvalidDatabaseError(
|
||||
"data section offset unexpectedly went from %v to %v",
|
||||
offset,
|
||||
newOffset,
|
||||
)
|
||||
}
|
||||
|
||||
pointer := offset
|
||||
|
||||
if _, ok := offsets[pointer]; !ok {
|
||||
return newInvalidDatabaseError(
|
||||
"found data (%v) at %v that the search tree does not point to",
|
||||
data,
|
||||
pointer,
|
||||
)
|
||||
}
|
||||
delete(offsets, pointer)
|
||||
|
||||
offset = newOffset
|
||||
}
|
||||
|
||||
if offset != bufferLen {
|
||||
return newInvalidDatabaseError(
|
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)",
|
||||
offset,
|
||||
bufferLen,
|
||||
)
|
||||
}
|
||||
|
||||
if len(offsets) != 0 {
|
||||
return newInvalidDatabaseError(
|
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section",
|
||||
len(offsets),
|
||||
pointerCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testError(
|
||||
field string,
|
||||
expected any,
|
||||
actual any,
|
||||
) error {
|
||||
return newInvalidDatabaseError(
|
||||
"%v - Expected: %v Actual: %v",
|
||||
field,
|
||||
expected,
|
||||
actual,
|
||||
)
|
||||
}
|
||||
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
* text=auto
|
||||
|
||||
benchmark/benchmark.toml text eol=lf
|
||||
testdata/** text eol=lf
|
||||
8
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
8
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
test_program/test_program_bin
|
||||
fuzz/
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
cmd/tomltestgen/tomltestgen
|
||||
dist
|
||||
tests/
|
||||
test-results
|
||||
76
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
76
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
version = "2"
|
||||
|
||||
[linters]
|
||||
default = "none"
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bodyclose",
|
||||
"dogsled",
|
||||
"dupl",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errorlint",
|
||||
"exhaustive",
|
||||
"forbidigo",
|
||||
"gochecknoinits",
|
||||
"goconst",
|
||||
"gocritic",
|
||||
"godoclint",
|
||||
"goheader",
|
||||
"gomodguard",
|
||||
"goprintffuncname",
|
||||
"gosec",
|
||||
"govet",
|
||||
"importas",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"mirror",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nilerr",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
"perfsprint",
|
||||
"prealloc",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"thelper",
|
||||
"tparallel",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"usetesting",
|
||||
"wastedassign",
|
||||
"whitespace",
|
||||
]
|
||||
|
||||
[linters.settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters.settings.lll]
|
||||
line-length = 150
|
||||
|
||||
[[linters.exclusions.rules]]
|
||||
path = ".test.go"
|
||||
linters = ["goconst", "gosec"]
|
||||
|
||||
[[linters.exclusions.rules]]
|
||||
path = "main.go"
|
||||
linters = ["forbidigo"]
|
||||
|
||||
[[linters.exclusions.rules]]
|
||||
path = "internal"
|
||||
linters = ["revive"]
|
||||
text = "(exported|indent-error-flow): "
|
||||
|
||||
[formatters]
|
||||
enable = [
|
||||
"gci",
|
||||
"gofmt",
|
||||
"gofumpt",
|
||||
"goimports",
|
||||
]
|
||||
124
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
124
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go fmt ./...
|
||||
- go test ./...
|
||||
builds:
|
||||
- id: tomll
|
||||
main: ./cmd/tomll
|
||||
binary: tomll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: tomljson
|
||||
main: ./cmd/tomljson
|
||||
binary: tomljson
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: jsontoml
|
||||
main: ./cmd/jsontoml
|
||||
binary: jsontoml
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_riscv64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
universal_binaries:
|
||||
- id: tomll
|
||||
replace: true
|
||||
name_template: tomll
|
||||
- id: tomljson
|
||||
replace: true
|
||||
name_template: tomljson
|
||||
- id: jsontoml
|
||||
replace: true
|
||||
name_template: jsontoml
|
||||
archives:
|
||||
- id: jsontoml
|
||||
format: tar.xz
|
||||
builds:
|
||||
- jsontoml
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomljson
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomljson
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomll
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomll
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
dockers:
|
||||
- id: tools
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
ids:
|
||||
- jsontoml
|
||||
- tomljson
|
||||
- tomll
|
||||
image_templates:
|
||||
- "ghcr.io/pelletier/go-toml:latest"
|
||||
- "ghcr.io/pelletier/go-toml:{{ .Tag }}"
|
||||
- "ghcr.io/pelletier/go-toml:v{{ .Major }}"
|
||||
skip_push: false
|
||||
checksum:
|
||||
name_template: 'sha256sums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
release:
|
||||
github:
|
||||
owner: pelletier
|
||||
name: go-toml
|
||||
draft: true
|
||||
prerelease: auto
|
||||
mode: replace
|
||||
changelog:
|
||||
use: github-native
|
||||
announce:
|
||||
skip: true
|
||||
64
vendor/github.com/pelletier/go-toml/v2/AGENTS.md
generated
vendored
Normal file
64
vendor/github.com/pelletier/go-toml/v2/AGENTS.md
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# Agent Guidelines for go-toml
|
||||
|
||||
This file provides guidelines for AI agents contributing to go-toml. All agents must follow these rules derived from [CONTRIBUTING.md](./CONTRIBUTING.md).
|
||||
|
||||
## Project Overview
|
||||
|
||||
go-toml is a TOML library for Go. The goal is to provide an easy-to-use and efficient TOML implementation that gets the job done without getting in the way.
|
||||
|
||||
## Code Change Rules
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- **No backward-incompatible changes** unless explicitly discussed and approved
|
||||
- Avoid breaking people's programs unless absolutely necessary
|
||||
|
||||
### Testing Requirements
|
||||
|
||||
- **All bug fixes must include regression tests**
|
||||
- **All new code must be tested**
|
||||
- Run tests before submitting: `go test -race ./...`
|
||||
- Test coverage must not decrease. Check with:
|
||||
```bash
|
||||
go test -covermode=atomic -coverprofile=coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
```
|
||||
- All lines of code touched by changes should be covered by tests
|
||||
|
||||
### Performance Requirements
|
||||
|
||||
- go-toml aims to stay efficient; avoid performance regressions
|
||||
- Run benchmarks to verify: `go test ./... -bench=. -count=10`
|
||||
- Compare results using [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
|
||||
|
||||
### Documentation
|
||||
|
||||
- New features or feature extensions must include documentation
|
||||
- Documentation lives in [README.md](./README.md) and throughout source code
|
||||
|
||||
### Code Style
|
||||
|
||||
- Follow existing code format and structure
|
||||
- Code must pass `go fmt`
|
||||
- Code must pass linting with the same golangci-lint version as CI (see version in `.github/workflows/lint.yml`):
|
||||
```bash
|
||||
# Install specific version (check lint.yml for current version)
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b $(go env GOPATH)/bin <version>
|
||||
# Run linter
|
||||
golangci-lint run ./...
|
||||
```
|
||||
|
||||
### Commit Messages
|
||||
|
||||
- Commit messages must explain **why** the change is needed
|
||||
- Keep messages clear and informative even if details are in the PR description
|
||||
|
||||
## Pull Request Checklist
|
||||
|
||||
Before submitting:
|
||||
|
||||
1. Tests pass (`go test -race ./...`)
|
||||
2. No backward-incompatible changes (unless discussed)
|
||||
3. Relevant documentation added/updated
|
||||
4. No performance regression (verify with benchmarks)
|
||||
5. Title is clear and understandable for changelog
|
||||
235
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
235
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for your interest in go-toml! We appreciate you considering
|
||||
contributing to go-toml!
|
||||
|
||||
The main goal is the project is to provide an easy-to-use and efficient TOML
|
||||
implementation for Go that gets the job done and gets out of your way – dealing
|
||||
with TOML is probably not the central piece of your project.
|
||||
|
||||
As the single maintainer of go-toml, time is scarce. All help, big or small, is
|
||||
more than welcomed!
|
||||
|
||||
## Ask questions
|
||||
|
||||
Any question you may have, somebody else might have it too. Always feel free to
|
||||
ask them on the [discussion board][discussions]. We will try to answer them as
|
||||
clearly and quickly as possible, time permitting.
|
||||
|
||||
Asking questions also helps us identify areas where the documentation needs
|
||||
improvement, or new features that weren't envisioned before. Sometimes, a
|
||||
seemingly innocent question leads to the fix of a bug. Don't hesitate and ask
|
||||
away!
|
||||
|
||||
[discussions]: https://github.com/pelletier/go-toml/discussions
|
||||
|
||||
## Improve the documentation
|
||||
|
||||
The best way to share your knowledge and experience with go-toml is to improve
|
||||
the documentation. Fix a typo, clarify an interface, add an example, anything
|
||||
goes!
|
||||
|
||||
The documentation is present in the [README][readme] and thorough the source
|
||||
code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
|
||||
to the documentation, create a pull request with your proposed changes. For
|
||||
simple changes like that, the easiest way to go is probably the "Fork this
|
||||
project and edit the file" button on GitHub, displayed at the top right of the
|
||||
file. Unless it's a trivial change (for example a typo), provide a little bit of
|
||||
context in your pull request description or commit message.
|
||||
|
||||
## Report a bug
|
||||
|
||||
Found a bug! Sorry to hear that :(. Help us and other track them down and fix by
|
||||
reporting it. [File a new bug report][bug-report] on the [issues
|
||||
tracker][issues-tracker]. The template should provide enough guidance on what to
|
||||
include. When in doubt: add more details! By reducing ambiguity and providing
|
||||
more information, it decreases back and forth and saves everyone time.
|
||||
|
||||
## Code changes
|
||||
|
||||
Want to contribute a patch? Very happy to hear that!
|
||||
|
||||
First, some high-level rules:
|
||||
|
||||
- A short proposal with some POC code is better than a lengthy piece of text
|
||||
with no code. Code speaks louder than words. That being said, bigger changes
|
||||
should probably start with a [discussion][discussions].
|
||||
- No backward-incompatible patch will be accepted unless discussed. Sometimes
|
||||
it's hard, but we try not to break people's programs unless we absolutely have
|
||||
to.
|
||||
- If you are writing a new feature or extending an existing one, make sure to
|
||||
write some documentation.
|
||||
- Bug fixes need to be accompanied with regression tests.
|
||||
- New code needs to be tested.
|
||||
- Your commit messages need to explain why the change is needed, even if already
|
||||
included in the PR description.
|
||||
|
||||
It does sound like a lot, but those best practices are here to save time overall
|
||||
and continuously improve the quality of the project, which is something everyone
|
||||
benefits from.
|
||||
|
||||
### Get started
|
||||
|
||||
The fairly standard code contribution process looks like that:
|
||||
|
||||
1. [Fork the project][fork].
|
||||
2. Make your changes, commit on any branch you like.
|
||||
3. [Open up a pull request][pull-request]
|
||||
4. Review, potential ask for changes.
|
||||
5. Merge.
|
||||
|
||||
Feel free to ask for help! You can create draft pull requests to gather
|
||||
some early feedback!
|
||||
|
||||
### Run the tests
|
||||
|
||||
You can run tests for go-toml using Go's test tool: `go test -race ./...`.
|
||||
|
||||
During the pull request process, all tests will be ran on Linux, Windows, and
|
||||
MacOS on the last two versions of Go.
|
||||
|
||||
However, given GitHub's new policy to _not_ run Actions on pull requests until a
|
||||
maintainer clicks on button, it is highly recommended that you run them locally
|
||||
as you make changes.
|
||||
|
||||
### Test across Go versions
|
||||
|
||||
The repository includes tooling to test go-toml across multiple Go versions
|
||||
(1.11 through 1.25) both locally and in GitHub Actions.
|
||||
|
||||
#### Local testing with Docker
|
||||
|
||||
Prerequisites: Docker installed and running, Bash shell, `rsync` command.
|
||||
|
||||
```bash
|
||||
# Test all Go versions in parallel (default)
|
||||
./test-go-versions.sh
|
||||
|
||||
# Test specific versions
|
||||
./test-go-versions.sh 1.21 1.22 1.23
|
||||
|
||||
# Test sequentially (slower but uses less resources)
|
||||
./test-go-versions.sh --sequential
|
||||
|
||||
# Verbose output with custom results directory
|
||||
./test-go-versions.sh --verbose --output ./my-results 1.24 1.25
|
||||
|
||||
# Show all options
|
||||
./test-go-versions.sh --help
|
||||
```
|
||||
|
||||
The script creates Docker containers for each Go version and runs the full test
|
||||
suite. Results are saved to a `test-results/` directory with individual logs and
|
||||
a comprehensive summary report.
|
||||
|
||||
The script only exits with a non-zero status code if either of the two most
|
||||
recent Go versions fail.
|
||||
|
||||
#### GitHub Actions testing (maintainers)
|
||||
|
||||
1. Go to the **Actions** tab in the GitHub repository
|
||||
2. Select **"Go Versions Compatibility Test"** from the workflow list
|
||||
3. Click **"Run workflow"**
|
||||
4. Optionally customize:
|
||||
- **Go versions**: Space-separated list (e.g., `1.21 1.22 1.23`)
|
||||
- **Execution mode**: Parallel (faster) or sequential (more stable)
|
||||
|
||||
### Check coverage
|
||||
|
||||
We use `go tool cover` to compute test coverage. Most code editors have a way to
|
||||
run and display code coverage, but at the end of the day, we do this:
|
||||
|
||||
```
|
||||
go test -covermode=atomic -coverprofile=coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
```
|
||||
|
||||
and verify that the overall percentage of tested code does not go down. This is
|
||||
a requirement. As a rule of thumb, all lines of code touched by your changes
|
||||
should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your
|
||||
code lowers the coverage.
|
||||
|
||||
### Verify performance
|
||||
|
||||
Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
|
||||
builtin benchmark systems. Because of their noisy nature, containers provided by
|
||||
GitHub Actions cannot be reliably used for benchmarking. As a result, you are
|
||||
responsible for checking that your changes do not incur a performance penalty.
|
||||
You can run their following to execute benchmarks:
|
||||
|
||||
```
|
||||
go test ./... -bench=. -count=10
|
||||
```
|
||||
|
||||
Benchmark results should be compared against each other with
|
||||
[benchstat][benchstat]. Typical flow looks like this:
|
||||
|
||||
1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to
|
||||
a file (for example `old.txt`).
|
||||
2. Make some code changes.
|
||||
3. Run `go test ....` again, and save the output to an other file (for example
|
||||
`new.txt`).
|
||||
4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any
|
||||
test.
|
||||
|
||||
On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts
|
||||
performance.
|
||||
|
||||
It is highly encouraged to add the benchstat results to your pull request
|
||||
description. Pull requests that lower performance will receive more scrutiny.
|
||||
|
||||
[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat
|
||||
|
||||
### Style
|
||||
|
||||
Try to look around and follow the same format and structure as the rest of the
|
||||
code. We enforce using `go fmt` on the whole code base.
|
||||
|
||||
---
|
||||
|
||||
## Maintainers-only
|
||||
|
||||
### Merge pull request
|
||||
|
||||
Checklist:
|
||||
|
||||
- Passing CI.
|
||||
- Does not introduce backward-incompatible changes (unless discussed).
|
||||
- Has relevant doc changes.
|
||||
- Benchstat does not show performance regression.
|
||||
- Pull request is [labeled appropriately][pr-labels].
|
||||
- Title will be understandable in the changelog.
|
||||
|
||||
1. Merge using "squash and merge".
|
||||
2. Make sure to edit the commit message to keep all the useful information
|
||||
nice and clean.
|
||||
3. Make sure the commit title is clear and contains the PR number (#123).
|
||||
|
||||
### New release
|
||||
|
||||
1. Decide on the next version number. Use semver. Review commits since last
|
||||
version to assess.
|
||||
2. Tag release. For example:
|
||||
```
|
||||
git checkout v2
|
||||
git pull
|
||||
git tag v2.2.0
|
||||
git push --tags
|
||||
```
|
||||
3. CI automatically builds a draft GitHub release. Review it and edit as
|
||||
necessary. Look for "Other changes". That would indicate a pull request not
|
||||
labeled properly. Tweak labels and pull request titles until changelog looks
|
||||
good for users.
|
||||
4. Check "create discussion" box, in the "Releases" category.
|
||||
5. If new version is an alpha or beta only, check pre-release box.
|
||||
|
||||
|
||||
[issues-tracker]: https://github.com/pelletier/go-toml/issues
|
||||
[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
|
||||
[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
|
||||
[readme]: ./README.md
|
||||
[fork]: https://help.github.com/articles/fork-a-repo
|
||||
[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
|
||||
[new-release]: https://github.com/pelletier/go-toml/releases/new
|
||||
[gh]: https://github.com/cli/cli
|
||||
[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml
|
||||
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
ENV PATH "$PATH:/bin"
|
||||
COPY tomll /bin/tomll
|
||||
COPY tomljson /bin/tomljson
|
||||
COPY jsontoml /bin/jsontoml
|
||||
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
go-toml v2
|
||||
Copyright (c) 2021 - 2023 Thomas Pelletier
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user