Switch to Go modules, update dependencies
This commit is contained in:
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
4
vendor/github.com/oschwald/maxminddb-golang/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.sw?
|
||||
*.test
|
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
3
vendor/github.com/oschwald/maxminddb-golang/.gitmodules
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
30
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
30
vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
[run]
|
||||
deadline = "10m"
|
||||
tests = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"errcheck",
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gocritic",
|
||||
"gofmt",
|
||||
"golint",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"ineffassign",
|
||||
"maligned",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"varcheck",
|
||||
"vet",
|
||||
"vetshadow",
|
||||
]
|
48
vendor/github.com/oschwald/maxminddb-golang/.travis.yml
generated
vendored
Normal file
48
vendor/github.com/oschwald/maxminddb-golang/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- tip
|
||||
|
||||
os:
|
||||
- linux
|
||||
- linux-ppc64le
|
||||
- osx
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
|
||||
before_script:
|
||||
- |
|
||||
if [[ $TRAVIS_GO_VERSION == 1.14 && $(arch) != 'ppc64le' ]]; then
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin
|
||||
fi
|
||||
|
||||
script:
|
||||
- |
|
||||
if [ $(arch) == "ppc64le" ]; then
|
||||
go test -cpu 1,4 -v
|
||||
else
|
||||
go test -race -cpu 1,4 -v
|
||||
fi
|
||||
- |
|
||||
if [ $(arch) == "ppc64le" ]; then
|
||||
go test -v -tags appengine
|
||||
else
|
||||
go test -race -v -tags appengine
|
||||
fi
|
||||
- |
|
||||
if [[ $TRAVIS_GO_VERSION == 1.14 && $(arch) != 'ppc64le' ]]; then
|
||||
golangci-lint run
|
||||
fi
|
||||
|
||||
sudo: false
|
38
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
38
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# MaxMind DB Reader for Go #
|
||||
|
||||
[](https://travis-ci.org/oschwald/maxminddb-golang)
|
||||
[](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
|
||||
[](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
19
vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
generated
vendored
Normal file
19
vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- git submodule update --init --recursive
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
180
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
180
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
@@ -27,8 +27,10 @@ const (
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
_Container
|
||||
_Marker
|
||||
// We don't use the next two. They are placeholders. See the spec
|
||||
// for more details.
|
||||
_Container // nolint: deadcode, varcheck
|
||||
_Marker // nolint: deadcode, varcheck
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
@@ -159,10 +161,8 @@ func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (u
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeBool(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeBool(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
@@ -207,10 +207,8 @@ func (d *decoder) indirect(result reflect.Value) reflect.Value {
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeBytes(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeBytes(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
@@ -230,10 +228,7 @@ func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value)
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat32(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeFloat32(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
@@ -253,10 +248,8 @@ func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value)
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat64(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeFloat64(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
@@ -277,10 +270,7 @@ func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeInt(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeInt(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
@@ -360,11 +350,8 @@ func (d *decoder) unmarshalSlice(
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeString(size, offset)
|
||||
value, newOffset := d.decodeString(size, offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
@@ -384,10 +371,7 @@ func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, ui
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
|
||||
}
|
||||
|
||||
value, newOffset, err := d.decodeUint(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeUint(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
@@ -416,10 +400,7 @@ func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value)
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeUint128(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, newOffset := d.decodeUint128(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
@@ -436,36 +417,36 @@ func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value)
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
|
||||
return size != 0, offset, nil
|
||||
func (d *decoder) decodeBool(size uint, offset uint) (bool, uint) {
|
||||
return size != 0, offset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
|
||||
func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset, nil
|
||||
return bytes, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
|
||||
func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset, nil
|
||||
return math.Float64frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
|
||||
func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset, nil
|
||||
return math.Float32frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
|
||||
func (d *decoder) decodeInt(size uint, offset uint) (int, uint) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset, nil
|
||||
return int(val), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
@@ -475,9 +456,14 @@ func (d *decoder) decodeMap(
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMap(result.Type()))
|
||||
result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
|
||||
}
|
||||
|
||||
mapType := result.Type()
|
||||
keyValue := reflect.New(mapType.Key()).Elem()
|
||||
elemType := mapType.Elem()
|
||||
elemKind := elemType.Kind()
|
||||
var elemValue reflect.Value
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
@@ -487,12 +473,17 @@ func (d *decoder) decodeMap(
|
||||
return 0, err
|
||||
}
|
||||
|
||||
value := reflect.New(result.Type().Elem())
|
||||
offset, err = d.decode(offset, value, depth)
|
||||
if !elemValue.IsValid() || elemKind == reflect.Interface {
|
||||
elemValue = reflect.New(elemType).Elem()
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, elemValue, depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
|
||||
|
||||
keyValue.SetString(string(key))
|
||||
result.SetMapIndex(keyValue, elemValue)
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
@@ -511,7 +502,7 @@ func (d *decoder) decodePointer(
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = uint(size & 0x7)
|
||||
prefix = size & 0x7
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
@@ -549,57 +540,18 @@ func (d *decoder) decodeSlice(
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
|
||||
func (d *decoder) decodeString(size uint, offset uint) (string, uint) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset, nil
|
||||
return string(d.buffer[offset:newOffset]), newOffset
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var (
|
||||
fieldMap = map[reflect.Type]*fieldsType{}
|
||||
fieldMapMu sync.RWMutex
|
||||
)
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
resultType := result.Type()
|
||||
|
||||
fieldMapMu.RLock()
|
||||
fields, ok := fieldMap[resultType]
|
||||
fieldMapMu.RUnlock()
|
||||
if !ok {
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fieldMapMu.Lock()
|
||||
fields = &fieldsType{namedFields, anonymous}
|
||||
fieldMap[resultType] = fields
|
||||
fieldMapMu.Unlock()
|
||||
}
|
||||
fields := cachedFields(result)
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
@@ -638,7 +590,45 @@ func (d *decoder) decodeStruct(
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var fieldsMap sync.Map
|
||||
|
||||
func cachedFields(result reflect.Value) *fieldsType {
|
||||
resultType := result.Type()
|
||||
|
||||
if fields, ok := fieldsMap.Load(resultType); ok {
|
||||
return fields.(*fieldsType)
|
||||
}
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fields := &fieldsType{namedFields, anonymous}
|
||||
fieldsMap.Store(resultType, fields)
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
@@ -646,15 +636,15 @@ func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset, nil
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error) {
|
||||
func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset, nil
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
|
8
vendor/github.com/oschwald/maxminddb-golang/go.mod
generated
vendored
Normal file
8
vendor/github.com/oschwald/maxminddb-golang/go.mod
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
module github.com/oschwald/maxminddb-golang
|
||||
|
||||
go 1.9
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.6.1
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76
|
||||
)
|
22
vendor/github.com/oschwald/maxminddb-golang/go.sum
generated
vendored
Normal file
22
vendor/github.com/oschwald/maxminddb-golang/go.sum
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw=
|
||||
github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76 h1:Dho5nD6R3PcW2SH1or8vS0dszDaXRxIw55lBX7XiE5g=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
2
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
2
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !windows,!appengine
|
||||
// +build !windows,!appengine,!plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
|
42
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
42
vendor/github.com/oschwald/maxminddb-golang/node.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package maxminddb
|
||||
|
||||
type nodeReader interface {
|
||||
readLeft(uint) uint
|
||||
readRight(uint) uint
|
||||
}
|
||||
|
||||
type nodeReader24 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader24) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader24) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+3]) << 16) | (uint(n.buffer[nodeNumber+4]) << 8) | uint(n.buffer[nodeNumber+5])
|
||||
}
|
||||
|
||||
type nodeReader28 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader28) readLeft(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) | (uint(n.buffer[nodeNumber]) << 16) | (uint(n.buffer[nodeNumber+1]) << 8) | uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader28) readRight(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) | (uint(n.buffer[nodeNumber+4]) << 16) | (uint(n.buffer[nodeNumber+5]) << 8) | uint(n.buffer[nodeNumber+6])
|
||||
}
|
||||
|
||||
type nodeReader32 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader32) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 24) | (uint(n.buffer[nodeNumber+1]) << 16) | (uint(n.buffer[nodeNumber+2]) << 8) | uint(n.buffer[nodeNumber+3])
|
||||
}
|
||||
|
||||
func (n nodeReader32) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+4]) << 24) | (uint(n.buffer[nodeNumber+5]) << 16) | (uint(n.buffer[nodeNumber+6]) << 8) | uint(n.buffer[nodeNumber+7])
|
||||
}
|
191
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
191
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
@@ -20,16 +20,22 @@ var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
//
|
||||
// All of the methods on Reader are thread-safe. The struct may be safely
|
||||
// shared across goroutines.
|
||||
type Reader struct {
|
||||
hasMappedFile bool
|
||||
buffer []byte
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
hasMappedFile bool
|
||||
buffer []byte
|
||||
nodeReader nodeReader
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
ipv4StartBitDepth int
|
||||
nodeOffsetMult uint
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// in has the format version, the build time as Unix epoch time, the database
|
||||
// it has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
@@ -74,65 +80,123 @@ func FromBytes(buffer []byte) (*Reader, error) {
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
nodeBuffer := buffer[:searchTreeSize]
|
||||
var nodeReader nodeReader
|
||||
switch metadata.RecordSize {
|
||||
case 24:
|
||||
nodeReader = nodeReader24{buffer: nodeBuffer}
|
||||
case 28:
|
||||
nodeReader = nodeReader28{buffer: nodeBuffer}
|
||||
case 32:
|
||||
nodeReader = nodeReader32{buffer: nodeBuffer}
|
||||
default:
|
||||
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
|
||||
}
|
||||
|
||||
reader.ipv4Start, err = reader.startNode()
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
nodeReader: nodeReader,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
nodeOffsetMult: metadata.RecordSize / 4,
|
||||
}
|
||||
|
||||
reader.setIPv4Start()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) startNode() (uint, error) {
|
||||
func (r *Reader) setIPv4Start() {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return 0, nil
|
||||
return
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
var err error
|
||||
for i := 0; i < 96 && node < nodeCount; i++ {
|
||||
node, err = r.readNode(node, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i := 0
|
||||
for ; i < 96 && node < nodeCount; i++ {
|
||||
node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
|
||||
}
|
||||
return node, err
|
||||
r.ipv4Start = node
|
||||
r.ipv4StartBitDepth = i
|
||||
}
|
||||
|
||||
// Lookup takes an IP address as a net.IP structure and a pointer to the
|
||||
// result value to Decode into.
|
||||
func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
|
||||
// Lookup retrieves the database record for ip and stores it in the value
|
||||
// pointed to by result. If result is nil or not a pointer, an error is
|
||||
// returned. If the data in the database record cannot be stored in result
|
||||
// because of type differences, an UnmarshalTypeError is returned. If the
|
||||
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
|
||||
// is returned.
|
||||
func (r *Reader) Lookup(ip net.IP, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupNetwork retrieves the database record for ip and stores it in the
|
||||
// value pointed to by result. The network returned is the network associated
|
||||
// with the data record in the database. The ok return value indicates whether
|
||||
// the database contained a record for the ip.
|
||||
//
|
||||
// If result is nil or not a pointer, an error is returned. If the data in the
|
||||
// database record cannot be stored in result because of type differences, an
|
||||
// UnmarshalTypeError is returned. If the database is invalid or otherwise
|
||||
// cannot be read, an InvalidDatabaseError is returned.
|
||||
func (r *Reader) LookupNetwork(ip net.IP, result interface{}) (network *net.IPNet, ok bool, err error) {
|
||||
if r.buffer == nil {
|
||||
return nil, false, errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, prefixLength, ip, err := r.lookupPointer(ip)
|
||||
|
||||
network = r.cidr(ip, prefixLength)
|
||||
if pointer == 0 || err != nil {
|
||||
return network, false, err
|
||||
}
|
||||
|
||||
return network, true, r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
|
||||
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
|
||||
// This is necessary as the node that the IPv4 start is at may
|
||||
// be at a bit depth that is less that 96, i.e., ipv4Start points
|
||||
// to a leaf node. For instance, if a record was inserted at ::/8,
|
||||
// the ipv4Start would point directly at the leaf node for the
|
||||
// record and would have a bit depth of 8. This would not happen
|
||||
// with databases currently distributed by MaxMind as all of them
|
||||
// have an IPv4 subtree that is greater than a single node.
|
||||
if r.Metadata.IPVersion == 6 &&
|
||||
len(ip) == net.IPv4len &&
|
||||
r.ipv4StartBitDepth != 96 {
|
||||
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
|
||||
}
|
||||
|
||||
mask := net.CIDRMask(prefixLength, len(ip)*8)
|
||||
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
@@ -166,79 +230,54 @@ func (r *Reader) decode(offset uintptr, result interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
|
||||
if ipAddress == nil {
|
||||
return 0, errors.New("ipAddress passed to Lookup cannot be nil")
|
||||
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
|
||||
if ip == nil {
|
||||
return 0, 0, ip, errors.New("IP passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ipAddress.To4()
|
||||
ipV4Address := ip.To4()
|
||||
if ipV4Address != nil {
|
||||
ipAddress = ipV4Address
|
||||
ip = ipV4Address
|
||||
}
|
||||
if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
|
||||
if len(ip) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, 0, ip, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ip.String())
|
||||
}
|
||||
|
||||
return r.findAddressInTree(ipAddress)
|
||||
}
|
||||
|
||||
func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
|
||||
|
||||
bitCount := uint(len(ipAddress) * 8)
|
||||
bitCount := uint(len(ip) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
node, prefixLength := r.traverseTree(ip, node, bitCount)
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
for i := uint(0); i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ipAddress[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
var err error
|
||||
node, err = r.readNode(node, bit)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, nil
|
||||
return 0, prefixLength, ip, nil
|
||||
} else if node > nodeCount {
|
||||
return node, nil
|
||||
return node, prefixLength, ip, nil
|
||||
}
|
||||
|
||||
return 0, newInvalidDatabaseError("invalid node in search tree")
|
||||
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
|
||||
RecordSize := r.Metadata.RecordSize
|
||||
func (r *Reader) traverseTree(ip net.IP, node uint, bitCount uint) (uint, int) {
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
baseOffset := nodeNumber * RecordSize / 4
|
||||
i := uint(0)
|
||||
for ; i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
var nodeBytes []byte
|
||||
var prefix uint
|
||||
switch RecordSize {
|
||||
case 24:
|
||||
offset := baseOffset + index*3
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 28:
|
||||
prefix = uint(r.buffer[baseOffset+3])
|
||||
if index != 0 {
|
||||
prefix &= 0x0F
|
||||
offset := node * r.nodeOffsetMult
|
||||
if bit == 0 {
|
||||
node = r.nodeReader.readLeft(offset)
|
||||
} else {
|
||||
prefix = (0xF0 & prefix) >> 4
|
||||
node = r.nodeReader.readRight(offset)
|
||||
}
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 32:
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+4]
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
|
||||
}
|
||||
return uintFromBytes(prefix, nodeBytes), nil
|
||||
|
||||
return node, int(i)
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result interface{}) error {
|
||||
@@ -252,7 +291,7 @@ func (r *Reader) retrieveData(pointer uint, result interface{}) error {
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved > uintptr(len(r.buffer)) {
|
||||
if resolved >= uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
|
2
vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
generated
vendored
2
vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build appengine
|
||||
// +build appengine plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
|
22
vendor/github.com/oschwald/maxminddb-golang/reader_other.go
generated
vendored
22
vendor/github.com/oschwald/maxminddb-golang/reader_other.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !appengine
|
||||
// +build !appengine,!plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
@@ -15,37 +15,37 @@ import (
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if rerr := mapFile.Close(); rerr != nil {
|
||||
err = rerr
|
||||
}
|
||||
}()
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mapFile.Close(); err != nil {
|
||||
_ = munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
if err2 := munmap(mmap); err2 != nil {
|
||||
// failing to unmap the file is probably the more severe error
|
||||
return nil, err2
|
||||
}
|
||||
_ = munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, err
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
|
97
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
97
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
@@ -1,6 +1,8 @@
|
||||
package maxminddb
|
||||
|
||||
import "net"
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
@@ -17,22 +19,52 @@ type Networks struct {
|
||||
err error
|
||||
}
|
||||
|
||||
var allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
|
||||
var allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in in an IPv6 database. This iterator will iterate over all of these
|
||||
// in an IPv6 database. This iterator will iterate over all of these
|
||||
// locations separately.
|
||||
func (r *Reader) Networks() *Networks {
|
||||
s := 4
|
||||
var networks *Networks
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
s = 16
|
||||
networks = r.NetworksWithin(allIPv6)
|
||||
} else {
|
||||
networks = r.NetworksWithin(allIPv4)
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// NetworksWithin returns an iterator that can be used to traverse all networks
|
||||
// in the database which are contained in a given network.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately.
|
||||
//
|
||||
// If the provided network is contained within a network in the database, the
|
||||
// iterator will iterate over exactly one network, the containing network.
|
||||
func (r *Reader) NetworksWithin(network *net.IPNet) *Networks {
|
||||
ip := network.IP
|
||||
prefixLength, _ := network.Mask.Size()
|
||||
|
||||
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
|
||||
ip = net.IP.To16(ip)
|
||||
prefixLength += 96
|
||||
}
|
||||
|
||||
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
|
||||
return &Networks{
|
||||
reader: r,
|
||||
nodes: []netNode{
|
||||
{
|
||||
ip: make(net.IP, s),
|
||||
ip: ip,
|
||||
bit: uint(bit),
|
||||
pointer: pointer,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -46,42 +78,31 @@ func (n *Networks) Next() bool {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for {
|
||||
if node.pointer < n.reader.Metadata.NodeCount {
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
rightPointer, err := n.reader.readNode(node.pointer, 1)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer, err = n.reader.readNode(node.pointer, 0)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
} else if node.pointer > n.reader.Metadata.NodeCount {
|
||||
for node.pointer != n.reader.Metadata.NodeCount {
|
||||
if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
offset := node.pointer * n.reader.nodeOffsetMult
|
||||
rightPointer := n.reader.nodeReader.readRight(offset)
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer = n.reader.nodeReader.readLeft(offset)
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user