Add Nix flake for mcproxyctl
Vendor dependencies and expose mcproxyctl binary via nix build. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.sw?
|
||||
*.test
|
||||
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
||||
192
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
192
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
[run]
|
||||
# This is needed for precious, which may run multiple instances
|
||||
# in parallel
|
||||
allow-parallel-runners = true
|
||||
go = "1.21"
|
||||
tests = true
|
||||
timeout = "10m"
|
||||
|
||||
[linters]
|
||||
enable-all = true
|
||||
disable = [
|
||||
"cyclop",
|
||||
"depguard",
|
||||
"err113",
|
||||
"execinquery",
|
||||
"exhaustive",
|
||||
"exhaustruct",
|
||||
"forcetypeassert",
|
||||
"funlen",
|
||||
"gochecknoglobals",
|
||||
"godox",
|
||||
"gomnd",
|
||||
"inamedparam",
|
||||
"interfacebloat",
|
||||
"mnd",
|
||||
"nlreturn",
|
||||
"nonamedreturns",
|
||||
"paralleltest",
|
||||
"thelper",
|
||||
"testpackage",
|
||||
|
||||
"varnamelen",
|
||||
"wrapcheck",
|
||||
"wsl",
|
||||
|
||||
# Require Go 1.22
|
||||
"copyloopvar",
|
||||
"intrange",
|
||||
]
|
||||
|
||||
[linters-settings.errorlint]
|
||||
errorf = true
|
||||
asserts = true
|
||||
comparison = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters-settings.forbidigo]
|
||||
# Forbid the following identifiers
|
||||
forbid = [
|
||||
{ p = "Geoip", msg = "you should use `GeoIP`" },
|
||||
{ p = "geoIP", msg = "you should use `geoip`" },
|
||||
{ p = "Maxmind", msg = "you should use `MaxMind`" },
|
||||
{ p = "^maxMind", msg = "you should use `maxmind`" },
|
||||
{ p = "Minfraud", msg = "you should use `MinFraud`" },
|
||||
{ p = "^minFraud", msg = "you should use `minfraud`" },
|
||||
{ p = "^math.Max$", msg = "you should use the max built-in instead." },
|
||||
{ p = "^math.Min$", msg = "you should use the min built-in instead." },
|
||||
{ p = "^os.IsNotExist", msg = "As per their docs, new code should use errors.Is(err, fs.ErrNotExist)." },
|
||||
{ p = "^os.IsExist", msg = "As per their docs, new code should use errors.Is(err, fs.ErrExist)" },
|
||||
]
|
||||
|
||||
[linters-settings.gci]
|
||||
sections = ["standard", "default", "prefix(github.com/oschwald/maxminddb-golang)"]
|
||||
|
||||
[linters-settings.gofumpt]
|
||||
extra-rules = true
|
||||
|
||||
[linters-settings.govet]
|
||||
enable-all = true
|
||||
disable = "shadow"
|
||||
|
||||
[linters-settings.lll]
|
||||
line-length = 120
|
||||
tab-width = 4
|
||||
|
||||
[linters-settings.misspell]
|
||||
locale = "US"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshall"
|
||||
correction = "marshal"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshalling"
|
||||
correction = "marshaling"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "marshalls"
|
||||
correction = "marshals"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshall"
|
||||
correction = "unmarshal"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshalling"
|
||||
correction = "unmarshaling"
|
||||
|
||||
[[linters-settings.misspell.extra-words]]
|
||||
typo = "unmarshalls"
|
||||
correction = "unmarshals"
|
||||
|
||||
[linters-settings.nolintlint]
|
||||
allow-unused = false
|
||||
allow-no-explanation = ["lll", "misspell"]
|
||||
require-explanation = true
|
||||
require-specific = true
|
||||
|
||||
[linters-settings.revive]
|
||||
enable-all-rules = true
|
||||
ignore-generated-header = true
|
||||
severity = "warning"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "add-constant"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "cognitive-complexity"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "confusing-naming"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "confusing-results"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "cyclomatic"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "deep-exit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "flag-parameter"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "function-length"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "function-result-limit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "line-length-limit"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "max-public-structs"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "nested-structs"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unchecked-type-assertion"
|
||||
disabled = true
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unhandled-error"
|
||||
disabled = true
|
||||
|
||||
[linters-settings.tagliatelle.case.rules]
|
||||
avro = "snake"
|
||||
bson = "snake"
|
||||
env = "upperSnake"
|
||||
envconfig = "upperSnake"
|
||||
json = "snake"
|
||||
mapstructure = "snake"
|
||||
xml = "snake"
|
||||
yaml = "snake"
|
||||
|
||||
[linters-settings.unparam]
|
||||
check-exported = true
|
||||
|
||||
|
||||
[[issues.exclude-rules]]
|
||||
linters = [
|
||||
"govet",
|
||||
"revive",
|
||||
]
|
||||
path = "_test.go"
|
||||
text = "fieldalignment:"
|
||||
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
Normal file
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
36
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
36
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# MaxMind DB Reader for Go #
|
||||
|
||||
[](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
||||
900
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
Normal file
900
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,900 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
type dataType int
|
||||
|
||||
const (
|
||||
_Extended dataType = iota
|
||||
_Pointer
|
||||
_String
|
||||
_Float64
|
||||
_Bytes
|
||||
_Uint16
|
||||
_Uint32
|
||||
_Map
|
||||
_Int32
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
// We don't use the next two. They are placeholders. See the spec
|
||||
// for more details.
|
||||
_Container //nolint: deadcode, varcheck // above
|
||||
_Marker //nolint: deadcode, varcheck // above
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the value used in libmaxminddb.
|
||||
maximumDataStructureDepth = 512
|
||||
)
|
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
|
||||
result.Set(reflect.ValueOf(uintptr(offset)))
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeToDeserializer(
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
getNext bool,
|
||||
) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
skip, err := dser.ShouldSkip(uintptr(offset))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if skip {
|
||||
if getNext {
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
|
||||
newOffset := offset + 1
|
||||
if offset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
ctrlByte := d.buffer[offset]
|
||||
|
||||
typeNum := dataType(ctrlByte >> 5)
|
||||
if typeNum == _Extended {
|
||||
if newOffset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
typeNum = dataType(d.buffer[newOffset] + 7)
|
||||
newOffset++
|
||||
}
|
||||
|
||||
var size uint
|
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
|
||||
return typeNum, size, newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) sizeFromCtrlByte(
|
||||
ctrlByte byte,
|
||||
offset uint,
|
||||
typeNum dataType,
|
||||
) (uint, uint, error) {
|
||||
size := uint(ctrlByte & 0x1f)
|
||||
if typeNum == _Extended {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
var bytesToRead uint
|
||||
if size < 29 {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
bytesToRead = size - 28
|
||||
newOffset := offset + bytesToRead
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
if size == 29 {
|
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil
|
||||
}
|
||||
|
||||
sizeBytes := d.buffer[offset:newOffset]
|
||||
|
||||
switch {
|
||||
case size == 30:
|
||||
size = 285 + uintFromBytes(0, sizeBytes)
|
||||
case size > 30:
|
||||
size = uintFromBytes(0, sizeBytes) + 65821
|
||||
}
|
||||
return size, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromType(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = indirect(result)
|
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
return unmarshalBool(size, offset, result)
|
||||
case _Map:
|
||||
return d.unmarshalMap(size, offset, result, depth)
|
||||
case _Pointer:
|
||||
return d.unmarshalPointer(size, offset, result, depth)
|
||||
case _Slice:
|
||||
return d.unmarshalSlice(size, offset, result, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
return d.unmarshalBytes(size, offset, result)
|
||||
case _Float32:
|
||||
return d.unmarshalFloat32(size, offset, result)
|
||||
case _Float64:
|
||||
return d.unmarshalFloat64(size, offset, result)
|
||||
case _Int32:
|
||||
return d.unmarshalInt32(size, offset, result)
|
||||
case _String:
|
||||
return d.unmarshalString(size, offset, result)
|
||||
case _Uint16:
|
||||
return d.unmarshalUint(size, offset, result, 16)
|
||||
case _Uint32:
|
||||
return d.unmarshalUint(size, offset, result, 32)
|
||||
case _Uint64:
|
||||
return d.unmarshalUint(size, offset, result, 64)
|
||||
case _Uint128:
|
||||
return d.unmarshalUint128(size, offset, result)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromTypeToDeserializer(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
v, offset := decodeBool(size, offset)
|
||||
return offset, dser.Bool(v)
|
||||
case _Map:
|
||||
return d.decodeMapToDeserializer(size, offset, dser, depth)
|
||||
case _Pointer:
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decodeToDeserializer(pointer, dser, depth, false)
|
||||
return newOffset, err
|
||||
case _Slice:
|
||||
return d.decodeSliceToDeserializer(size, offset, dser, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
v, offset := d.decodeBytes(size, offset)
|
||||
return offset, dser.Bytes(v)
|
||||
case _Float32:
|
||||
v, offset := d.decodeFloat32(size, offset)
|
||||
return offset, dser.Float32(v)
|
||||
case _Float64:
|
||||
v, offset := d.decodeFloat64(size, offset)
|
||||
return offset, dser.Float64(v)
|
||||
case _Int32:
|
||||
v, offset := d.decodeInt(size, offset)
|
||||
return offset, dser.Int32(int32(v))
|
||||
case _String:
|
||||
v, offset := d.decodeString(size, offset)
|
||||
return offset, dser.String(v)
|
||||
case _Uint16:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint16(uint16(v))
|
||||
case _Uint32:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint32(uint32(v))
|
||||
case _Uint64:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint64(v)
|
||||
case _Uint128:
|
||||
v, offset := d.decodeUint128(size, offset)
|
||||
return offset, dser.Uint128(v)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalBool(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (bool size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := decodeBool(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func indirect(result reflect.Value) reflect.Value {
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() {
|
||||
e := result.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
result = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if result.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.New(result.Type().Elem()))
|
||||
}
|
||||
|
||||
result = result.Elem()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeBytes(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
result.SetBytes(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat32(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
result.SetFloat(float64(value))
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float 64 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat64(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
return 0, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
result.SetFloat(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (int32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeInt(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
n := uint64(value)
|
||||
if !result.OverflowUint(n) {
|
||||
result.SetUint(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = indirect(result)
|
||||
switch result.Kind() {
|
||||
default:
|
||||
return 0, newUnmarshalTypeStrError("map", result.Type())
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(size, offset, result, depth)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
rv := reflect.ValueOf(make(map[string]any, size))
|
||||
newOffset, err := d.decodeMap(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
return 0, newUnmarshalTypeStrError("map", result.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalPointer(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decode(pointer, result, depth)
|
||||
return newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
a := []any{}
|
||||
rv := reflect.ValueOf(&a).Elem()
|
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
}
|
||||
return 0, newUnmarshalTypeStrError("array", result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeString(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalUint(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
uintType uint,
|
||||
) (uint, error) {
|
||||
if size > uintType/8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint%v size of %v)",
|
||||
uintType,
|
||||
size,
|
||||
)
|
||||
}
|
||||
|
||||
value, newOffset := d.decodeUint(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
if !result.OverflowUint(value) {
|
||||
result.SetUint(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{})
|
||||
|
||||
func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint128 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeUint128(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
if result.Type() == bigIntType {
|
||||
result.Set(reflect.ValueOf(*value))
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func decodeBool(size, offset uint) (bool, uint) {
|
||||
return size != 0, offset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size, offset uint) (int, uint) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
|
||||
}
|
||||
|
||||
mapType := result.Type()
|
||||
keyValue := reflect.New(mapType.Key()).Elem()
|
||||
elemType := mapType.Elem()
|
||||
var elemValue reflect.Value
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if elemValue.IsValid() {
|
||||
// After 1.20 is the minimum supported version, this can just be
|
||||
// elemValue.SetZero()
|
||||
reflectSetZero(elemValue)
|
||||
} else {
|
||||
elemValue = reflect.New(elemType).Elem()
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, elemValue, depth)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("decoding value for %s: %w", key, err)
|
||||
}
|
||||
|
||||
keyValue.SetString(string(key))
|
||||
result.SetMapIndex(keyValue, elemValue)
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMapToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartMap(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
// TODO - implement key/value skipping?
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePointer(
|
||||
size uint,
|
||||
offset uint,
|
||||
) (uint, uint, error) {
|
||||
pointerSize := ((size >> 3) & 0x3) + 1
|
||||
newOffset := offset + pointerSize
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
pointerBytes := d.buffer[offset:newOffset]
|
||||
var prefix uint
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = size & 0x7
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
var pointerValueOffset uint
|
||||
switch pointerSize {
|
||||
case 1:
|
||||
pointerValueOffset = 0
|
||||
case 2:
|
||||
pointerValueOffset = 2048
|
||||
case 3:
|
||||
pointerValueOffset = 526336
|
||||
case 4:
|
||||
pointerValueOffset = 0
|
||||
}
|
||||
|
||||
pointer := unpacked + pointerValueOffset
|
||||
|
||||
return pointer, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
|
||||
for i := 0; i < int(size); i++ {
|
||||
var err error
|
||||
offset, err = d.decode(offset, result.Index(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSliceToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartSlice(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size, offset uint) (string, uint) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
fields := cachedFields(result)
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ {
|
||||
var (
|
||||
err error
|
||||
key []byte
|
||||
)
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)]
|
||||
if !ok {
|
||||
offset, err = d.nextValueOffset(offset, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("decoding value for %s: %w", key, err)
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var fieldsMap sync.Map
|
||||
|
||||
func cachedFields(result reflect.Value) *fieldsType {
|
||||
resultType := result.Type()
|
||||
|
||||
if fields, ok := fieldsMap.Load(resultType); ok {
|
||||
return fields.(*fieldsType)
|
||||
}
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fields := &fieldsType{namedFields, anonymous}
|
||||
fieldsMap.Store(resultType, fields)
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size, offset uint) (uint64, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
var val uint64
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
val := prefix
|
||||
for _, b := range uintBytes {
|
||||
val = (val << 8) | uint(b)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
|
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if typeNum == _Pointer {
|
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
key, _, err := d.decodeKey(pointer)
|
||||
return key, ptrOffset, err
|
||||
}
|
||||
if typeNum != _String {
|
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
|
||||
}
|
||||
newOffset := dataOffset + size
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return nil, 0, newOffsetError()
|
||||
}
|
||||
return d.buffer[dataOffset:newOffset], newOffset, nil
|
||||
}
|
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types.
|
||||
func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) {
|
||||
if numberToSkip == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
typeNum, size, offset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch typeNum {
|
||||
case _Pointer:
|
||||
_, offset, err = d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case _Map:
|
||||
numberToSkip += 2 * size
|
||||
case _Slice:
|
||||
numberToSkip += size
|
||||
case _Bool:
|
||||
default:
|
||||
offset += size
|
||||
}
|
||||
return d.nextValueOffset(offset, numberToSkip-1)
|
||||
}
|
||||
31
vendor/github.com/oschwald/maxminddb-golang/deserializer.go
generated
vendored
Normal file
31
vendor/github.com/oschwald/maxminddb-golang/deserializer.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package maxminddb
|
||||
|
||||
import "math/big"
|
||||
|
||||
// deserializer is an interface for a type that deserializes an MaxMind DB
|
||||
// data record to some other type. This exists as an alternative to the
|
||||
// standard reflection API.
|
||||
//
|
||||
// This is fundamentally different than the Unmarshaler interface that
|
||||
// several packages provide. A Deserializer will generally create the
|
||||
// final struct or value rather than unmarshaling to itself.
|
||||
//
|
||||
// This interface and the associated unmarshaling code is EXPERIMENTAL!
|
||||
// It is not currently covered by any Semantic Versioning guarantees.
|
||||
// Use at your own risk.
|
||||
type deserializer interface {
|
||||
ShouldSkip(offset uintptr) (bool, error)
|
||||
StartSlice(size uint) error
|
||||
StartMap(size uint) error
|
||||
End() error
|
||||
String(string) error
|
||||
Float64(float64) error
|
||||
Bytes([]byte) error
|
||||
Uint16(uint16) error
|
||||
Uint32(uint32) error
|
||||
Int32(int32) error
|
||||
Uint64(uint64) error
|
||||
Uint128(*big.Int) error
|
||||
Bool(bool) error
|
||||
Float32(float32) error
|
||||
}
|
||||
46
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
Normal file
46
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newOffsetError() InvalidDatabaseError {
|
||||
return InvalidDatabaseError{"unexpected end of database"}
|
||||
}
|
||||
|
||||
func newInvalidDatabaseError(format string, args ...any) InvalidDatabaseError {
|
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
func (e InvalidDatabaseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct {
|
||||
Type reflect.Type
|
||||
Value string
|
||||
}
|
||||
|
||||
func newUnmarshalTypeStrError(value string, rType reflect.Type) UnmarshalTypeError {
|
||||
return UnmarshalTypeError{
|
||||
Type: rType,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func newUnmarshalTypeError(value any, rType reflect.Type) UnmarshalTypeError {
|
||||
return newUnmarshalTypeStrError(fmt.Sprintf("%v (%T)", value, value), rType)
|
||||
}
|
||||
|
||||
func (e UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type)
|
||||
}
|
||||
16
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
Normal file
16
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !windows && !appengine && !plan9 && !js && !wasip1 && !wasi
|
||||
// +build !windows,!appengine,!plan9,!js,!wasip1,!wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(fd, length int) (data []byte, err error) {
|
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
||||
86
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
Normal file
86
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
//go:build windows && !appengine
|
||||
// +build windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type memoryMap []byte
|
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]windows.Handle{}
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
|
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
|
||||
0, uintptr(length))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := memoryMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = length
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := windows.FlushViewOfFile(addr, len)
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
m := memoryMap(b)
|
||||
dh := m.header()
|
||||
|
||||
addr := dh.Data
|
||||
length := uintptr(dh.Len)
|
||||
|
||||
flush(addr, length)
|
||||
err = windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
58
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
58
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package maxminddb
|
||||
|
||||
type nodeReader interface {
|
||||
readLeft(uint) uint
|
||||
readRight(uint) uint
|
||||
}
|
||||
|
||||
type nodeReader24 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader24) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader24) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+3]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 8) |
|
||||
uint(n.buffer[nodeNumber+5])
|
||||
}
|
||||
|
||||
type nodeReader28 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader28) readLeft(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) |
|
||||
(uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader28) readRight(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 8) |
|
||||
uint(n.buffer[nodeNumber+6])
|
||||
}
|
||||
|
||||
type nodeReader32 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader32) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+2]) << 8) |
|
||||
uint(n.buffer[nodeNumber+3])
|
||||
}
|
||||
|
||||
func (n nodeReader32) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+4]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+6]) << 8) |
|
||||
uint(n.buffer[nodeNumber+7])
|
||||
}
|
||||
310
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
Normal file
310
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
// Package maxminddb provides a reader for the MaxMind DB file format.
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0)
|
||||
|
||||
dataSectionSeparatorSize = 16
|
||||
)
|
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
//
|
||||
// All of the methods on Reader are thread-safe. The struct may be safely
|
||||
// shared across goroutines.
|
||||
type Reader struct {
|
||||
nodeReader nodeReader
|
||||
buffer []byte
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
ipv4StartBitDepth int
|
||||
nodeOffsetMult uint
|
||||
hasMappedFile bool
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// it has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
Description map[string]string `maxminddb:"description"`
|
||||
DatabaseType string `maxminddb:"database_type"`
|
||||
Languages []string `maxminddb:"languages"`
|
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
|
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
|
||||
BuildEpoch uint `maxminddb:"build_epoch"`
|
||||
IPVersion uint `maxminddb:"ip_version"`
|
||||
NodeCount uint `maxminddb:"node_count"`
|
||||
RecordSize uint `maxminddb:"record_size"`
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) {
|
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
|
||||
|
||||
if metadataStart == -1 {
|
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
|
||||
}
|
||||
|
||||
metadataStart += len(metadataStartMarker)
|
||||
metadataDecoder := decoder{buffer[metadataStart:]}
|
||||
|
||||
var metadata Metadata
|
||||
|
||||
rvMetadata := reflect.ValueOf(&metadata)
|
||||
_, err := metadataDecoder.decode(0, rvMetadata, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
|
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
|
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
|
||||
if dataSectionStart > dataSectionEnd {
|
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
|
||||
}
|
||||
d := decoder{
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
nodeBuffer := buffer[:searchTreeSize]
|
||||
var nodeReader nodeReader
|
||||
switch metadata.RecordSize {
|
||||
case 24:
|
||||
nodeReader = nodeReader24{buffer: nodeBuffer}
|
||||
case 28:
|
||||
nodeReader = nodeReader28{buffer: nodeBuffer}
|
||||
case 32:
|
||||
nodeReader = nodeReader32{buffer: nodeBuffer}
|
||||
default:
|
||||
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
nodeReader: nodeReader,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
nodeOffsetMult: metadata.RecordSize / 4,
|
||||
}
|
||||
|
||||
reader.setIPv4Start()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) setIPv4Start() {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
i := 0
|
||||
for ; i < 96 && node < nodeCount; i++ {
|
||||
node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
|
||||
}
|
||||
r.ipv4Start = node
|
||||
r.ipv4StartBitDepth = i
|
||||
}
|
||||
|
||||
// Lookup retrieves the database record for ip and stores it in the value
|
||||
// pointed to by result. If result is nil or not a pointer, an error is
|
||||
// returned. If the data in the database record cannot be stored in result
|
||||
// because of type differences, an UnmarshalTypeError is returned. If the
|
||||
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
|
||||
// is returned.
|
||||
func (r *Reader) Lookup(ip net.IP, result any) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupNetwork retrieves the database record for ip and stores it in the
|
||||
// value pointed to by result. The network returned is the network associated
|
||||
// with the data record in the database. The ok return value indicates whether
|
||||
// the database contained a record for the ip.
|
||||
//
|
||||
// If result is nil or not a pointer, an error is returned. If the data in the
|
||||
// database record cannot be stored in result because of type differences, an
|
||||
// UnmarshalTypeError is returned. If the database is invalid or otherwise
|
||||
// cannot be read, an InvalidDatabaseError is returned.
|
||||
func (r *Reader) LookupNetwork(
|
||||
ip net.IP,
|
||||
result any,
|
||||
) (network *net.IPNet, ok bool, err error) {
|
||||
if r.buffer == nil {
|
||||
return nil, false, errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, prefixLength, ip, err := r.lookupPointer(ip)
|
||||
|
||||
network = r.cidr(ip, prefixLength)
|
||||
if pointer == 0 || err != nil {
|
||||
return network, false, err
|
||||
}
|
||||
|
||||
return network, true, r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
|
||||
// This is necessary as the node that the IPv4 start is at may
|
||||
// be at a bit depth that is less that 96, i.e., ipv4Start points
|
||||
// to a leaf node. For instance, if a record was inserted at ::/8,
|
||||
// the ipv4Start would point directly at the leaf node for the
|
||||
// record and would have a bit depth of 8. This would not happen
|
||||
// with databases currently distributed by MaxMind as all of them
|
||||
// have an IPv4 subtree that is greater than a single node.
|
||||
if r.Metadata.IPVersion == 6 &&
|
||||
len(ip) == net.IPv4len &&
|
||||
r.ipv4StartBitDepth != 96 {
|
||||
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
|
||||
}
|
||||
|
||||
mask := net.CIDRMask(prefixLength, len(ip)*8)
|
||||
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty any value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result any) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Decode on a closed database")
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) decode(offset uintptr, result any) error {
|
||||
rv := reflect.ValueOf(result)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return errors.New("result param must be a pointer")
|
||||
}
|
||||
|
||||
if dser, ok := result.(deserializer); ok {
|
||||
_, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := r.decoder.decode(uint(offset), rv, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
|
||||
if ip == nil {
|
||||
return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ip.To4()
|
||||
if ipV4Address != nil {
|
||||
ip = ipV4Address
|
||||
}
|
||||
if len(ip) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, 0, ip, fmt.Errorf(
|
||||
"error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database",
|
||||
ip.String(),
|
||||
)
|
||||
}
|
||||
|
||||
bitCount := uint(len(ip) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
node, prefixLength := r.traverseTree(ip, node, bitCount)
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, prefixLength, ip, nil
|
||||
} else if node > nodeCount {
|
||||
return node, prefixLength, ip, nil
|
||||
}
|
||||
|
||||
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) {
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
i := uint(0)
|
||||
for ; i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
offset := node * r.nodeOffsetMult
|
||||
if bit == 0 {
|
||||
node = r.nodeReader.readLeft(offset)
|
||||
} else {
|
||||
node = r.nodeReader.readRight(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return node, int(i)
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result any) error {
|
||||
offset, err := r.resolveDataPointer(pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved >= uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
||||
26
vendor/github.com/oschwald/maxminddb-golang/reader_memory.go
generated
vendored
Normal file
26
vendor/github.com/oschwald/maxminddb-golang/reader_memory.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
//go:build appengine || plan9 || js || wasip1 || wasi
|
||||
// +build appengine plan9 js wasip1 wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map
|
||||
// on supported platforms. On platforms without memory map support, such
|
||||
// as WebAssembly or Google App Engine, the database is loaded into memory.
|
||||
// Use the Close method on the Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
bytes, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(bytes)
|
||||
}
|
||||
|
||||
// Close returns the resources used by the database to the system.
|
||||
func (r *Reader) Close() error {
|
||||
r.buffer = nil
|
||||
return nil
|
||||
}
|
||||
64
vendor/github.com/oschwald/maxminddb-golang/reader_mmap.go
generated
vendored
Normal file
64
vendor/github.com/oschwald/maxminddb-golang/reader_mmap.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
//go:build !appengine && !plan9 && !js && !wasip1 && !wasi
|
||||
// +build !appengine,!plan9,!js,!wasip1,!wasi
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map
|
||||
// on supported platforms. On platforms without memory map support, such
|
||||
// as WebAssembly or Google App Engine, the database is loaded into memory.
|
||||
// Use the Close method on the Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mapFile.Close(); err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Close returns the resources used by the database to the system.
|
||||
func (r *Reader) Close() error {
|
||||
var err error
|
||||
if r.hasMappedFile {
|
||||
runtime.SetFinalizer(r, nil)
|
||||
r.hasMappedFile = false
|
||||
err = munmap(r.buffer)
|
||||
}
|
||||
r.buffer = nil
|
||||
return err
|
||||
}
|
||||
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_120.go
generated
vendored
Normal file
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_120.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
func reflectSetZero(v reflect.Value) {
|
||||
v.SetZero()
|
||||
}
|
||||
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_pre120.go
generated
vendored
Normal file
10
vendor/github.com/oschwald/maxminddb-golang/set_zero_pre120.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !go1.20
|
||||
// +build !go1.20
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
func reflectSetZero(v reflect.Value) {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
}
|
||||
211
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
Normal file
211
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
ip net.IP
|
||||
bit uint
|
||||
pointer uint
|
||||
}
|
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct {
|
||||
err error
|
||||
reader *Reader
|
||||
nodes []netNode
|
||||
lastNode netNode
|
||||
skipAliasedNetworks bool
|
||||
}
|
||||
|
||||
var (
|
||||
allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
|
||||
allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
|
||||
)
|
||||
|
||||
// NetworksOption are options for Networks and NetworksWithin.
|
||||
type NetworksOption func(*Networks)
|
||||
|
||||
// SkipAliasedNetworks is an option for Networks and NetworksWithin that
|
||||
// makes them not iterate over aliases of the IPv4 subtree in an IPv6
|
||||
// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
|
||||
//
|
||||
// You most likely want to set this. The only reason it isn't the default
|
||||
// behavior is to provide backwards compatibility to existing users.
|
||||
func SkipAliasedNetworks(networks *Networks) {
|
||||
networks.skipAliasedNetworks = true
|
||||
}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
func (r *Reader) Networks(options ...NetworksOption) *Networks {
|
||||
var networks *Networks
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
networks = r.NetworksWithin(allIPv6, options...)
|
||||
} else {
|
||||
networks = r.NetworksWithin(allIPv4, options...)
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// NetworksWithin returns an iterator that can be used to traverse all networks
|
||||
// in the database which are contained in a given network.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
//
|
||||
// If the provided network is contained within a network in the database, the
|
||||
// iterator will iterate over exactly one network, the containing network.
|
||||
func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks {
|
||||
if r.Metadata.IPVersion == 4 && network.IP.To4() == nil {
|
||||
return &Networks{
|
||||
err: fmt.Errorf(
|
||||
"error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database",
|
||||
network.String(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
networks := &Networks{reader: r}
|
||||
for _, option := range options {
|
||||
option(networks)
|
||||
}
|
||||
|
||||
ip := network.IP
|
||||
prefixLength, _ := network.Mask.Size()
|
||||
|
||||
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
|
||||
if networks.skipAliasedNetworks {
|
||||
ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]}
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
prefixLength += 96
|
||||
}
|
||||
|
||||
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
|
||||
|
||||
// We could skip this when bit >= prefixLength if we assume that the network
|
||||
// passed in is in canonical form. However, given that this may not be the
|
||||
// case, it is safest to always take the mask. If this is hot code at some
|
||||
// point, we could eliminate the allocation of the net.IPMask by zeroing
|
||||
// out the bits in ip directly.
|
||||
ip = ip.Mask(net.CIDRMask(bit, len(ip)*8))
|
||||
networks.nodes = []netNode{
|
||||
{
|
||||
ip: ip,
|
||||
bit: uint(bit),
|
||||
pointer: pointer,
|
||||
},
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool {
|
||||
if n.err != nil {
|
||||
return false
|
||||
}
|
||||
for len(n.nodes) > 0 {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for node.pointer != n.reader.Metadata.NodeCount {
|
||||
// This skips IPv4 aliases without hardcoding the networks that the writer
|
||||
// currently aliases.
|
||||
if n.skipAliasedNetworks && n.reader.ipv4Start != 0 &&
|
||||
node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) {
|
||||
break
|
||||
}
|
||||
|
||||
if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
}
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
offset := node.pointer * n.reader.nodeOffsetMult
|
||||
rightPointer := n.reader.nodeReader.readRight(offset)
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer = n.reader.nodeReader.readLeft(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result any) (*net.IPNet, error) {
|
||||
if n.err != nil {
|
||||
return nil, n.err
|
||||
}
|
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ip := n.lastNode.ip
|
||||
prefixLength := int(n.lastNode.bit)
|
||||
|
||||
// We do this because uses of SkipAliasedNetworks expect the IPv4 networks
|
||||
// to be returned as IPv4 networks. If we are not skipping aliased
|
||||
// networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
|
||||
// network as Go automatically converts those.
|
||||
if n.skipAliasedNetworks && isInIPv4Subtree(ip) {
|
||||
ip = ip[12:]
|
||||
prefixLength -= 96
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: ip,
|
||||
Mask: net.CIDRMask(prefixLength, len(ip)*8),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error {
|
||||
return n.err
|
||||
}
|
||||
|
||||
// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
|
||||
// IPv4 subtree.
|
||||
func isInIPv4Subtree(ip net.IP) bool {
|
||||
if len(ip) != 16 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < 12; i++ {
|
||||
if ip[i] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
201
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
Normal file
201
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type verifier struct {
|
||||
reader *Reader
|
||||
}
|
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error {
|
||||
v := verifier{r}
|
||||
if err := v.verifyMetadata(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := v.verifyDatabase()
|
||||
runtime.KeepAlive(v.reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *verifier) verifyMetadata() error {
|
||||
metadata := v.reader.Metadata
|
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 {
|
||||
return testError(
|
||||
"binary_format_major_version",
|
||||
2,
|
||||
metadata.BinaryFormatMajorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 {
|
||||
return testError(
|
||||
"binary_format_minor_version",
|
||||
0,
|
||||
metadata.BinaryFormatMinorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.DatabaseType == "" {
|
||||
return testError(
|
||||
"database_type",
|
||||
"non-empty string",
|
||||
metadata.DatabaseType,
|
||||
)
|
||||
}
|
||||
|
||||
if len(metadata.Description) == 0 {
|
||||
return testError(
|
||||
"description",
|
||||
"non-empty slice",
|
||||
metadata.Description,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
|
||||
return testError(
|
||||
"ip_version",
|
||||
"4 or 6",
|
||||
metadata.IPVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.RecordSize != 24 &&
|
||||
metadata.RecordSize != 28 &&
|
||||
metadata.RecordSize != 32 {
|
||||
return testError(
|
||||
"record_size",
|
||||
"24, 28, or 32",
|
||||
metadata.RecordSize,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.NodeCount == 0 {
|
||||
return testError(
|
||||
"node_count",
|
||||
"positive integer",
|
||||
metadata.NodeCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDatabase() error {
|
||||
offsets, err := v.verifySearchTree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDataSection(offsets)
|
||||
}
|
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
|
||||
offsets := make(map[uint]bool)
|
||||
|
||||
it := v.reader.Networks()
|
||||
for it.Next() {
|
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[uint(offset)] = true
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error {
|
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
|
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
|
||||
|
||||
for _, b := range separator {
|
||||
if b != 0 {
|
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
|
||||
pointerCount := len(offsets)
|
||||
|
||||
decoder := v.reader.decoder
|
||||
|
||||
var offset uint
|
||||
bufferLen := uint(len(decoder.buffer))
|
||||
for offset < bufferLen {
|
||||
var data any
|
||||
rv := reflect.ValueOf(&data)
|
||||
newOffset, err := decoder.decode(offset, rv, 0)
|
||||
if err != nil {
|
||||
return newInvalidDatabaseError(
|
||||
"received decoding error (%v) at offset of %v",
|
||||
err,
|
||||
offset,
|
||||
)
|
||||
}
|
||||
if newOffset <= offset {
|
||||
return newInvalidDatabaseError(
|
||||
"data section offset unexpectedly went from %v to %v",
|
||||
offset,
|
||||
newOffset,
|
||||
)
|
||||
}
|
||||
|
||||
pointer := offset
|
||||
|
||||
if _, ok := offsets[pointer]; !ok {
|
||||
return newInvalidDatabaseError(
|
||||
"found data (%v) at %v that the search tree does not point to",
|
||||
data,
|
||||
pointer,
|
||||
)
|
||||
}
|
||||
delete(offsets, pointer)
|
||||
|
||||
offset = newOffset
|
||||
}
|
||||
|
||||
if offset != bufferLen {
|
||||
return newInvalidDatabaseError(
|
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)",
|
||||
offset,
|
||||
bufferLen,
|
||||
)
|
||||
}
|
||||
|
||||
if len(offsets) != 0 {
|
||||
return newInvalidDatabaseError(
|
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section",
|
||||
len(offsets),
|
||||
pointerCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testError(
|
||||
field string,
|
||||
expected any,
|
||||
actual any,
|
||||
) error {
|
||||
return newInvalidDatabaseError(
|
||||
"%v - Expected: %v Actual: %v",
|
||||
field,
|
||||
expected,
|
||||
actual,
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user