diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 06bea8c..93ebf83 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,10 +19,10 @@ jobs: steps: - - name: set up go 1.18 + - name: set up go 1.19 uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: checkout @@ -40,7 +40,7 @@ jobs: - name: install golangci-lint and goveralls run: | - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $GITHUB_WORKSPACE v1.46.2 + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $GITHUB_WORKSPACE v1.50.1 GO111MODULE=off go get -u github.com/mattn/goveralls - name: run linters diff --git a/.golangci.yml b/.golangci.yml index 48008c7..8f6d2e1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -42,11 +42,9 @@ linters: - gocyclo - dupl - misspell - - varcheck - - deadcode + - unused - typecheck - ineffassign - - varcheck - stylecheck - gochecknoinits - exportloopref @@ -59,6 +57,9 @@ linters: issues: exclude-rules: + - text: "package-comments: should have a package comment" + linters: + - revive - text: "at least one file in a package should have a package comment" linters: - stylecheck diff --git a/README.md b/README.md index 518ae40..ac2bfe5 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ The idea of external services is to be able to integrate status of all related s ``` Application Options: + -f, --config= config file [$CONFIG] -l, --listen= listen on host:port (default: localhost:8080) [$LISTEN] -v, --volume= volumes to report (default: root:/) [$VOLUMES] -s, --service= services to report [$SERVICES] @@ -36,6 +37,40 @@ Help Options: * services (`--service`, can be repeated) is a list of name:url pairs, where name is a name of the service, and url is a url to the service. Supports `http`, `https`, `mongodb` and `docker` schemes. The response for each service will be in `services` field. * concurrency (`--concurrency`) is a number of concurrent requests to services. * timeout (`--timeout`) is a timeout for each request to services. +* config file (`--config`, `-f`) is a path to the config file, see below for details. + +## configuration file + +`sys-agent` can be configured with a yaml file as well. The file should contain a list of volumes and services. The file can be specified via `--config` or `-f` options or `CONFIG` environment variable. + +```yml +volumes: + - {name: root, path: /hostroot} + - {name: data, path: /data} + +services: + mongo: + - {name: dev, url: mongodb://example.com:27017, oplog_max_delta: 30m} + certificate: + - {name: prim_cert, url: https://example1.com} + - {name: second_cert, url: https://example2.com} + docker: + - {name: docker1, url: unix:///var/run/docker.sock, containers: [reproxy, mattermost, postgres]} + - {name: docker2, url: tcp://192.168.1.1:4080} + file: + - {name: first, path: /tmp/example1.txt} + - {name: second, path: /tmp/example2.txt} + http: + - {name: first, url: https://example1.com} + - {name: second, url: https://example2.com} + program: + - {name: first, path: /usr/bin/example1, args: [arg1, arg2]} + - {name: second, path: /usr/bin/example2} + nginx: + - {name: nginx, status_url: http://example.com:80} +``` + +The config file has the same structure as command line options. `sys-agent` converts the config file to command line options and then parses them as usual. ## basic checks diff --git a/app/config/config.go b/app/config/config.go new file mode 100644 index 0000000..06b3ae1 --- /dev/null +++ b/app/config/config.go @@ -0,0 +1,160 @@ +// Package config provides the configuration for the application. +package config + +import ( + "fmt" + "os" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// Parameters represents the whole configuration parameters +type Parameters struct { + Volumes []Volume `yaml:"volumes"` + Services struct { + HTTP []HTTP `yaml:"http"` + Certificate []Certificate `yaml:"certificate"` + File []File `yaml:"file"` + Mongo []Mongo `yaml:"mongo"` + Nginx []Nginx `yaml:"nginx"` + Program []Program `yaml:"program"` + Docker []Docker `yaml:"docker"` + } `yaml:"services"` + + fileName string `yaml:"-"` +} + +// Volume represents a volumes to check +type Volume struct { + Name string `yaml:"name"` + Path string `yaml:"path"` +} + +// HTTP represents a http service to check +type HTTP struct { + Name string `yaml:"name"` + URL string `yaml:"url"` +} + +// Certificate represents a certificate to check +type Certificate struct { + Name string `yaml:"name"` + URL string `yaml:"url"` +} + +// Docker represents a docker container to check +type Docker struct { + Name string `yaml:"name"` + URL string `yaml:"url"` + Containers []string `yaml:"containers"` // required containers +} + +// File represents a file to check +type File struct { + Name string `yaml:"name"` + Path string `yaml:"path"` +} + +// Mongo represents a mongo service to check +type Mongo struct { + Name string `yaml:"name"` + URL string `yaml:"url"` + OplogMaxDelta time.Duration `yaml:"oplog_max_delta"` +} + +// Nginx represents a nginx service to check +type Nginx struct { + Name string `yaml:"name"` + StatusURL string `yaml:"status_url"` +} + +// Program represents a program to check +type Program struct { + Name string `yaml:"name"` + Path string `yaml:"path"` + Args []string `yaml:"args"` +} + +// New creates a new Parameters from the given file +func New(fname string) (*Parameters, error) { + p := &Parameters{fileName: fname} + data, err := os.ReadFile(fname) // nolint gosec + if err != nil { + return nil, fmt.Errorf("can't read config %s: %w", fname, err) + } + if err = yaml.Unmarshal(data, &p); err != nil { + return nil, fmt.Errorf("failed to parsse config %s: %w", fname, err) + } + return p, nil +} + +// MarshalVolumes returns the volumes as a list of strings with the format "name:path" +func (p *Parameters) MarshalVolumes() []string { + res := make([]string, 0, len(p.Volumes)) + for _, v := range p.Volumes { + res = append(res, fmt.Sprintf("%s:%s", v.Name, v.Path)) + } + return res +} + +// MarshalServices returns the services as a list of strings with the format used by command line +func (p *Parameters) MarshalServices() []string { + res := []string{} + + for _, v := range p.Services.HTTP { + res = append(res, fmt.Sprintf("%s:%s", v.Name, v.URL)) + } + + for _, v := range p.Services.Certificate { + url := strings.TrimPrefix(v.URL, "https://") + url = strings.TrimPrefix(url, "http://") + res = append(res, fmt.Sprintf("%s:cert://%s", v.Name, url)) + } + + for _, v := range p.Services.Docker { + url := strings.TrimPrefix(v.URL, "https://") + url = strings.TrimPrefix(url, "http://") + url = strings.TrimPrefix(url, "tcp://") + url = strings.TrimPrefix(url, "unix://") + if len(v.Containers) > 0 { + url += "?containers=" + strings.Join(v.Containers, ",") + } + res = append(res, fmt.Sprintf("%s:docker://%s", v.Name, url)) + } + + for _, v := range p.Services.File { + res = append(res, fmt.Sprintf("%s:file://%s", v.Name, v.Path)) + } + + for _, v := range p.Services.Mongo { + m := fmt.Sprintf("%s:%s", v.Name, v.URL) + if v.OplogMaxDelta > 0 { + if strings.Contains(m, "?") { + m += fmt.Sprintf("&oplogMaxDelta=%v", v.OplogMaxDelta) + } else { + m += fmt.Sprintf("?oplogMaxDelta=%v", v.OplogMaxDelta) + } + } + res = append(res, m) + } + + for _, v := range p.Services.Nginx { + res = append(res, fmt.Sprintf("%s:nginx:%s", v.Name, v.StatusURL)) + } + + for _, v := range p.Services.Program { + prg := fmt.Sprintf("%s:program://%s", v.Name, v.Path) + if len(v.Args) > 0 { + prg += "?args=\"" + strings.Join(v.Args, " ") + "\"" + } + res = append(res, prg) + } + + return res +} + +func (p *Parameters) String() string { + return fmt.Sprintf("config file: %q, %+v", p.fileName, *p) +} diff --git a/app/config/config_test.go b/app/config/config_test.go new file mode 100644 index 0000000..c12a7f5 --- /dev/null +++ b/app/config/config_test.go @@ -0,0 +1,81 @@ +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + { + _, err := New("testdata/invalid.yml") + require.Error(t, err) + assert.EqualErrorf(t, err, "can't read config testdata/invalid.yml: open testdata/invalid.yml: no such file or directory", "expected error") + } + + { + p, err := New("testdata/config.yml") + require.NoError(t, err) + assert.Equal(t, []Volume{{Name: "root", Path: "/hostroot"}, {Name: "data", Path: "/data"}}, p.Volumes) + assert.Equal(t, []Certificate{{Name: "prim_cert", URL: "https://example1.com"}, + {Name: "second_cert", URL: "https://example2.com"}}, p.Services.Certificate) + assert.Equal(t, []Docker{ + {Name: "docker1", URL: "unix:///var/run/docker.sock", Containers: []string{"reproxy", "mattermost", "postgres"}}, + {Name: "docker2", URL: "tcp://192.168.1.1:4080", Containers: []string(nil)}}, p.Services.Docker) + assert.Equal(t, []File{{Name: "first", Path: "/tmp/example1.txt"}, {Name: "second", Path: "/tmp/example2.txt"}}, + p.Services.File) + assert.Equal(t, []HTTP{{Name: "first", URL: "https://example1.com"}, {Name: "second", URL: "https://example2.com"}}, + p.Services.HTTP) + assert.Equal(t, []Mongo{{Name: "dev", URL: "mongodb://example.com:27017", OplogMaxDelta: 30 * time.Minute}}, + p.Services.Mongo) + assert.Equal(t, []Nginx{{Name: "nginx", StatusURL: "http://example.com:80"}}, p.Services.Nginx) + } +} + +func TestParameters_MarshalVolumes(t *testing.T) { + p, err := New("testdata/config.yml") + require.NoError(t, err) + assert.Equal(t, []string{"root:/hostroot", "data:/data"}, p.MarshalVolumes()) +} + +func TestParameters_String(t *testing.T) { + p, err := New("testdata/config.yml") + require.NoError(t, err) + exp := `config file: "testdata/config.yml", {Volumes:[{Name:root Path:/hostroot} {Name:data Path:/data}] Services:{HTTP:[{Name:first URL:https://example1.com} {Name:second URL:https://example2.com}] Certificate:[{Name:prim_cert URL:https://example1.com} {Name:second_cert URL:https://example2.com}] File:[{Name:first Path:/tmp/example1.txt} {Name:second Path:/tmp/example2.txt}] Mongo:[{Name:dev URL:mongodb://example.com:27017 OplogMaxDelta:30m0s}] Nginx:[{Name:nginx StatusURL:http://example.com:80}] Program:[{Name:first Path:/usr/bin/example1 Args:[arg1 arg2]} {Name:second Path:/usr/bin/example2 Args:[]}] Docker:[{Name:docker1 URL:unix:///var/run/docker.sock Containers:[reproxy mattermost postgres]} {Name:docker2 URL:tcp://192.168.1.1:4080 Containers:[]}]} fileName:testdata/config.yml}` + assert.Equal(t, exp, p.String()) +} + +func TestParameters_MarshalServices(t *testing.T) { + { + p, err := New("testdata/config.yml") + require.NoError(t, err) + exp := []string{ + "first:https://example1.com", "second:https://example2.com", + "prim_cert:cert://example1.com", "second_cert:cert://example2.com", + "docker1:docker:///var/run/docker.sock?containers=reproxy,mattermost,postgres", "docker2:docker://192.168.1.1:4080", + "first:file:///tmp/example1.txt", "second:file:///tmp/example2.txt", + "dev:mongodb://example.com:27017?oplogMaxDelta=30m0s", + "nginx:nginx:http://example.com:80", + "first:program:///usr/bin/example1?args=\"arg1 arg2\"", "second:program:///usr/bin/example2", + } + assert.Equal(t, exp, p.MarshalServices()) + } + + { // test mongo with query params + p, err := New("testdata/config.yml") + require.NoError(t, err) + p.Services.Mongo[0].URL = "mongodb://example.com:27017/admin?foo=bar&blah=blah" + exp := "dev:mongodb://example.com:27017/admin?foo=bar&blah=blah&oplogMaxDelta=30m0s" + res := p.MarshalServices() + found := false + for _, r := range res { + if r == exp { + found = true + break + } + } + assert.True(t, found, "expected %s in %v", exp, res) + } +} diff --git a/app/config/testdata/config.yml b/app/config/testdata/config.yml new file mode 100644 index 0000000..e051d2c --- /dev/null +++ b/app/config/testdata/config.yml @@ -0,0 +1,24 @@ +volumes: + - {name: root, path: /hostroot} + - {name: data, path: /data} + +services: + mongo: + - {name: dev, url: mongodb://example.com:27017, oplog_max_delta: 30m} + certificate: + - {name: prim_cert, url: https://example1.com} + - {name: second_cert, url: https://example2.com} + docker: + - {name: docker1, url: unix:///var/run/docker.sock, containers: [reproxy, mattermost, postgres]} + - {name: docker2, url: tcp://192.168.1.1:4080} + file: + - {name: first, path: /tmp/example1.txt} + - {name: second, path: /tmp/example2.txt} + http: + - {name: first, url: https://example1.com} + - {name: second, url: https://example2.com} + program: + - {name: first, path: /usr/bin/example1, args: [arg1, arg2]} + - {name: second, path: /usr/bin/example2} + nginx: + - {name: nginx, status_url: http://example.com:80} \ No newline at end of file diff --git a/app/main.go b/app/main.go index 1d101e7..65f9456 100644 --- a/app/main.go +++ b/app/main.go @@ -15,6 +15,7 @@ import ( "github.com/go-pkgz/lgr" "github.com/umputun/go-flags" + "github.com/umputun/sys-agent/app/config" "github.com/umputun/sys-agent/app/server" "github.com/umputun/sys-agent/app/status" "github.com/umputun/sys-agent/app/status/external" @@ -23,14 +24,16 @@ import ( var revision string var opts struct { + Config string `short:"f" long:"config" env:"CONFIG" description:"config file"` + Listen string `short:"l" long:"listen" env:"LISTEN" default:"localhost:8080" description:"listen on host:port"` Volumes []string `short:"v" long:"volume" env:"VOLUMES" default:"root:/" env-delim:"," description:"volumes to report"` - Services []string `short:"s" long:"service" env:"SERVICES" env-delim:"," description:"services to report"` - Concurrency int `long:"concurrency" env:"CONCURRENCY" default:"4" description:"number of concurrent requests to services"` - TimeOut time.Duration `long:"timeout" env:"TIMEOUT" default:"5s" description:"timeout for each request to services"` + Services []string `short:"s" long:"service" env:"SERVICES" env-delim:"," description:"services to report"` + TimeOut time.Duration `long:"timeout" env:"TIMEOUT" default:"5s" description:"timeout for each request to services"` - Dbg bool `long:"dbg" env:"DEBUG" description:"show debug info"` + Concurrency int `long:"concurrency" env:"CONCURRENCY" default:"4" description:"number of concurrent requests to services"` + Dbg bool `long:"dbg" env:"DEBUG" description:"show debug info"` } func main() { @@ -62,7 +65,17 @@ func main() { cancel() }() - vols, err := parseVolumes(opts.Volumes) + var conf *config.Parameters + if opts.Config != "" { + var err error + conf, err = config.New(opts.Config) + if err != nil { + log.Fatalf("[ERROR] can't load config, %s", err) + } + log.Printf("[DEBUG] %s", conf.String()) + } + + vols, err := parseVolumes(opts.Volumes, conf) if err != nil { log.Fatalf("[ERROR] %s", err) } @@ -82,7 +95,7 @@ func main() { Version: revision, Status: &status.Service{ Volumes: vols, - ExtServices: external.NewService(providers, opts.Concurrency, opts.Services...), + ExtServices: external.NewService(providers, opts.Concurrency, services(opts.Services, conf)...), }, } @@ -91,16 +104,42 @@ func main() { } } +// service returns list of services to check, merge config and command line +func services(optsSvcs []string, conf *config.Parameters) (res []string) { + if len(optsSvcs) > 0 { + res = optsSvcs + } + if conf != nil { + res = append(res, conf.MarshalServices()...) + } + log.Printf("[DEBUG] services: %+v", res) + return res +} + // parseVolumes parses volumes from string list, each element in format "name:path" -func parseVolumes(volumes []string) ([]status.Volume, error) { - res := make([]status.Volume, len(volumes)) - for i, v := range volumes { - parts := strings.SplitN(v, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid volume format, should be :") +// picks volumes from config if present and overrides with command line +func parseVolumes(volumes []string, conf *config.Parameters) ([]status.Volume, error) { + res := []status.Volume{} + + // load from config if present and volumes provided + if conf != nil && len(conf.Volumes) > 0 { + for _, v := range conf.Volumes { + res = append(res, status.Volume{Name: v.Name, Path: v.Path}) + } + } + + // load from command line even if config present + if len(volumes) > 0 { + res = []status.Volume{} // reset volumes from config (if filled), don't merge + for _, v := range volumes { + parts := strings.SplitN(v, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid volume format, should be :") + } + res = append(res, status.Volume{Name: parts[0], Path: parts[1]}) } - res[i] = status.Volume{Name: parts[0], Path: parts[1]} } + log.Printf("[DEBUG] volumes: %+v", res) return res, nil } diff --git a/app/main_test.go b/app/main_test.go index ab7a04a..d2c8b43 100644 --- a/app/main_test.go +++ b/app/main_test.go @@ -15,10 +15,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/umputun/sys-agent/app/config" "github.com/umputun/sys-agent/app/status" ) -func Test_parseVolumes(t *testing.T) { +func Test_parseVolumes_ArgsOnly(t *testing.T) { tbl := []struct { inp []string @@ -33,7 +34,7 @@ func Test_parseVolumes(t *testing.T) { for i, tt := range tbl { t.Run(strconv.Itoa(i), func(t *testing.T) { - vols, err := parseVolumes(tt.inp) + vols, err := parseVolumes(tt.inp, nil) if tt.err != nil { require.EqualError(t, err, tt.err.Error()) return @@ -43,6 +44,22 @@ func Test_parseVolumes(t *testing.T) { } } +func Test_parseVolumes_ConfigOnly(t *testing.T) { + conf, err := config.New("config/testdata/config.yml") + require.NoError(t, err) + vols, err := parseVolumes(nil, conf) + require.NoError(t, err) + assert.Equal(t, []status.Volume{{Name: "root", Path: "/hostroot"}, {Name: "data", Path: "/data"}}, vols) +} + +func Test_parseVolumes_ArgsAndConfig(t *testing.T) { + conf, err := config.New("config/testdata/config.yml") + require.NoError(t, err) + vols, err := parseVolumes([]string{"data volume:/data", "blah:/"}, conf) + require.NoError(t, err) + assert.Equal(t, []status.Volume{{Name: "data volume", Path: "/data"}, {Name: "blah", Path: "/"}}, vols) +} + func Test_main(t *testing.T) { port := 40000 + int(rand.Int31n(1000)) os.Args = []string{"app", "--listen=127.0.0.1:" + strconv.Itoa(port), "-v root:/", "-s echo:https://echo.umputun.com", "--dbg"} @@ -98,3 +115,39 @@ func waitForHTTPServerStart(port int) { } } } + +func Test_services(t *testing.T) { + tbl := []struct { + cli []string + cfg func() *config.Parameters + res []string + }{ + {[]string{"echo:https://echo.umputun.com"}, func() *config.Parameters { return nil }, + []string{"echo:https://echo.umputun.com"}}, + + { + []string{"s1", "s2", "s3"}, func() *config.Parameters { + res := &config.Parameters{} + res.Services.HTTP = []config.HTTP{{Name: "n1", URL: "http://example.com"}} + return res + }, + []string{"s1", "s2", "s3", "n1:http://example.com"}, + }, + + { + []string{}, func() *config.Parameters { + res := &config.Parameters{} + res.Services.HTTP = []config.HTTP{{Name: "n1", URL: "http://example.com"}} + return res + }, + []string{"n1:http://example.com"}, + }, + } + + for i, tt := range tbl { + t.Run(strconv.Itoa(i), func(t *testing.T) { + res := services(tt.cli, tt.cfg()) + assert.Equal(t, tt.res, res) + }) + } +} diff --git a/app/server/server.go b/app/server/server.go index 508daa7..10c5179 100644 --- a/app/server/server.go +++ b/app/server/server.go @@ -5,7 +5,7 @@ import ( "net/http" "time" - "github.com/didip/tollbooth/v6" + "github.com/didip/tollbooth/v7" "github.com/didip/tollbooth_chi" "github.com/go-chi/chi/v5" log "github.com/go-pkgz/lgr" diff --git a/app/status/external/mongo_provider.go b/app/status/external/mongo_provider.go index 72df224..954c2f8 100644 --- a/app/status/external/mongo_provider.go +++ b/app/status/external/mongo_provider.go @@ -22,6 +22,8 @@ type MongoProvider struct { } // Status returns status of mongo, checks if connection established and ping is ok +// request URL looks like mongo:mongodb://172.17.42.1:27017/test?oplogMaxDelta=30m +// oplogMaxDelta is optional, if set, checks if oplog is not too far behind func (m *MongoProvider) Status(req Request) (*Response, error) { st := time.Now() ctx, cancel := context.WithTimeout(context.Background(), m.TimeOut) diff --git a/config-example.yml b/config-example.yml new file mode 100644 index 0000000..02c25c2 --- /dev/null +++ b/config-example.yml @@ -0,0 +1,11 @@ +volumes: + - {name: root, path: /hostroot} + - {name: data, path: /data} + +services: + certificate: + - {name: rt_cert, url: https://radio-t.com} + - {name: ump_cert, url: https://umputun.com} + http: + - {name: news_ping, url: https://news.radio-t.com/api/ping} + - {name: site_ping, url: https://radio-t.com/site-api/ping} diff --git a/go.mod b/go.mod index 341e42a..73d8963 100644 --- a/go.mod +++ b/go.mod @@ -1,44 +1,43 @@ module github.com/umputun/sys-agent -go 1.18 +go 1.19 require ( - github.com/didip/tollbooth/v6 v6.1.1 - github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21 - github.com/go-chi/chi/v5 v5.0.7 + github.com/didip/tollbooth/v7 v7.0.1 + github.com/didip/tollbooth_chi v0.0.0-20220719025231-d662a7f6928f + github.com/go-chi/chi/v5 v5.0.8 github.com/go-pkgz/fileutils v0.1.0 github.com/go-pkgz/lgr v0.10.4 github.com/go-pkgz/mongo/v2 v2.0.0 - github.com/go-pkgz/rest v1.12.2 + github.com/go-pkgz/rest v1.17.0 github.com/go-pkgz/syncs v1.2.0 - github.com/shirou/gopsutil/v3 v3.22.1 - github.com/stretchr/testify v1.7.0 + github.com/shirou/gopsutil/v3 v3.22.12 + github.com/stretchr/testify v1.8.1 github.com/umputun/go-flags v1.5.1 - go.mongodb.org/mongo-driver v1.8.3 + go.mongodb.org/mongo-driver v1.11.1 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-pkgz/expirable-cache v0.0.3 // indirect - github.com/go-stack/stack v1.8.1 // indirect + github.com/go-pkgz/expirable-cache v1.0.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/klauspost/compress v1.14.2 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect + github.com/montanaflynn/stats v0.7.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.4.0 // indirect + github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.0 // indirect - github.com/xdg-go/stringprep v1.0.2 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 0eef163..4cecbcc 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/didip/tollbooth/v6 v6.0.1/go.mod h1:j2pKs+JQ5PvU/K4jFnrnwntrmfUbYLJE5oSdxR37FD0= -github.com/didip/tollbooth/v6 v6.1.1 h1:Nt7PvWLa9Y94OrykXsFNBinVRQIu8xdy4avpl99Dc1M= -github.com/didip/tollbooth/v6 v6.1.1/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= -github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21 h1:x7YpwKSBIBcKe9I3aTNOqgSyJ6QKDdtOxnEkxBTsi9w= -github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21/go.mod h1:0ZVa6kSzS011nfTC1rELyxK4tjVf6vqBnOv7oY2KlsA= -github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8= -github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/didip/tollbooth/v7 v7.0.0/go.mod h1:VZhDSGl5bDSPj4wPsih3PFa4Uh9Ghv8hgacaTm5PRT4= +github.com/didip/tollbooth/v7 v7.0.1 h1:TkT4sBKoQoHQFPf7blQ54iHrZiTDnr8TceU+MulVAog= +github.com/didip/tollbooth/v7 v7.0.1/go.mod h1:VZhDSGl5bDSPj4wPsih3PFa4Uh9Ghv8hgacaTm5PRT4= +github.com/didip/tollbooth_chi v0.0.0-20220719025231-d662a7f6928f h1:jtKwihcLmUC9BAhoJ9adCUqdSSZcOdH2KL7mPTUm2aw= +github.com/didip/tollbooth_chi v0.0.0-20220719025231-d662a7f6928f/go.mod h1:q9C80dnsuVRP2dAskjnXRNWdUJqtGgwG9wNrzt0019s= +github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= +github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6dksJLXQDc= -github.com/go-pkgz/expirable-cache v0.0.3/go.mod h1:+IauqN00R2FqNRLCLA+X5YljQJrwB179PfiAoMPlTlQ= +github.com/go-pkgz/expirable-cache v0.1.0/go.mod h1:GTrEl0X+q0mPNqN6dtcQXksACnzCBQ5k/k1SwXJsZKs= +github.com/go-pkgz/expirable-cache v1.0.0 h1:ns5+1hjY8hntGv8bPaQd9Gr7Jyo+Uw5SLyII40aQdtA= +github.com/go-pkgz/expirable-cache v1.0.0/go.mod h1:GTrEl0X+q0mPNqN6dtcQXksACnzCBQ5k/k1SwXJsZKs= github.com/go-pkgz/fileutils v0.1.0 h1:tfyQWSwe+4AXeXk5Lq/Uq7TLkAdY4PxV/xGD+kqLUi0= github.com/go-pkgz/fileutils v0.1.0/go.mod h1:h1jlQoXxZxZrVjVg3Vxdbufs9qQ+jtj2WylTy3y9L4k= github.com/go-pkgz/lgr v0.2.2/go.mod h1:hBM1NM/SoYdlrykgdgJWGrZ/TM/XaZIjRbJfx7NkMm8= @@ -19,111 +20,130 @@ github.com/go-pkgz/lgr v0.10.4 h1:l7qyFjqEZgwRgaQQSEp6tve4A3OU80VrfzpvtEX8ngw= github.com/go-pkgz/lgr v0.10.4/go.mod h1:CD0s1z6EFpIUplV067gitF77tn25JItzwHNKAPqeCF0= github.com/go-pkgz/mongo/v2 v2.0.0 h1:X5ZXAqsgVRU434/q20aShdGtR0eeNb+JPwaybc+L5EE= github.com/go-pkgz/mongo/v2 v2.0.0/go.mod h1:R87FUhLqA9e4o+Pgwh83bfnLkzHn7rJvTFkJBmO+FCQ= -github.com/go-pkgz/rest v1.12.2 h1:AQ7uuBmMcufbiUBc4IS8aqDNMvRJlhtRpXLSNRitjVo= -github.com/go-pkgz/rest v1.12.2/go.mod h1:KUWAqbDteYGS/CiXftomQsKjtEOifXsJ36Ka0skYbmk= +github.com/go-pkgz/rest v1.17.0 h1:LoBI/lDBMuqwWhOOkc6thM9NnwJO+K9nWvCOjZ7BAgE= +github.com/go-pkgz/rest v1.17.0/go.mod h1:HHlLOt02NJc2sgffXBF6hYVMcRo4Gz3vjg43zTzN7VM= github.com/go-pkgz/syncs v1.2.0 h1:aiizQFILlMZ4KtRNaYLcDffRbUQZH9fclsgr5KybWyY= github.com/go-pkgz/syncs v1.2.0/go.mod h1:fjThZdM2FkC/oSeiqBTOZOtHpbrCh4HuHbipB5qZJJM= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/shirou/gopsutil/v3 v3.22.1 h1:33y31Q8J32+KstqPfscvFwBlNJ6xLaBy4xqBXzlYV5w= -github.com/shirou/gopsutil/v3 v3.22.1/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/shirou/gopsutil/v3 v3.22.12 h1:oG0ns6poeUSxf78JtOsfygNWuEHYYz8hnnNg7P04TJs= +github.com/shirou/gopsutil/v3 v3.22.12/go.mod h1:Xd7P1kwZcp5VW52+9XsirIKd/BROzbb2wdX3Kqlz9uI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/umputun/go-flags v1.5.1 h1:vRauoXV3Ultt1HrxivSxowbintgZLJE+EcBy5ta3/mY= github.com/umputun/go-flags v1.5.1/go.mod h1:nTbvsO/hKqe7Utri/NoyN18GR3+EWf+9RrmsdwdhrEc= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.0 h1:d70R37I0HrDLsafRrMBXyrD4lmQbCHE873t00Vr0gm0= -github.com/xdg-go/scram v1.1.0/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8= +go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE= -golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/didip/tollbooth/v6/libstring/libstring.go b/vendor/github.com/didip/tollbooth/v6/libstring/libstring.go deleted file mode 100644 index fef29af..0000000 --- a/vendor/github.com/didip/tollbooth/v6/libstring/libstring.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package libstring provides various string related functions. -package libstring - -import ( - "net" - "net/http" - "strings" -) - -// StringInSlice finds needle in a slice of strings. -func StringInSlice(sliceString []string, needle string) bool { - for _, b := range sliceString { - if b == needle { - return true - } - } - return false -} - -// RemoteIP finds IP Address given http.Request struct. -func RemoteIP(ipLookups []string, forwardedForIndexFromBehind int, r *http.Request) string { - realIP := r.Header.Get("X-Real-IP") - forwardedFor := r.Header.Get("X-Forwarded-For") - - for _, lookup := range ipLookups { - if lookup == "RemoteAddr" { - // 1. Cover the basic use cases for both ipv4 and ipv6 - ip, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - // 2. Upon error, just return the remote addr. - return r.RemoteAddr - } - return ip - } - if lookup == "X-Forwarded-For" && forwardedFor != "" { - // X-Forwarded-For is potentially a list of addresses separated with "," - parts := strings.Split(forwardedFor, ",") - for i, p := range parts { - parts[i] = strings.TrimSpace(p) - } - - partIndex := len(parts) - 1 - forwardedForIndexFromBehind - if partIndex < 0 { - partIndex = 0 - } - - return parts[partIndex] - } - if lookup == "X-Real-IP" && realIP != "" { - return realIP - } - } - - return "" -} diff --git a/vendor/github.com/didip/tollbooth/v6/.gitignore b/vendor/github.com/didip/tollbooth/v7/.gitignore similarity index 72% rename from vendor/github.com/didip/tollbooth/v6/.gitignore rename to vendor/github.com/didip/tollbooth/v7/.gitignore index 1043f53..91ea4d7 100644 --- a/vendor/github.com/didip/tollbooth/v6/.gitignore +++ b/vendor/github.com/didip/tollbooth/v7/.gitignore @@ -1,2 +1,3 @@ /debug /.vscode +/.idea \ No newline at end of file diff --git a/vendor/github.com/didip/tollbooth/v6/.golangci.yml b/vendor/github.com/didip/tollbooth/v7/.golangci.yml similarity index 94% rename from vendor/github.com/didip/tollbooth/v6/.golangci.yml rename to vendor/github.com/didip/tollbooth/v7/.golangci.yml index 98b2ab1..880786a 100644 --- a/vendor/github.com/didip/tollbooth/v6/.golangci.yml +++ b/vendor/github.com/didip/tollbooth/v7/.golangci.yml @@ -1,7 +1,7 @@ linters: enable: - megacheck - - golint + - revive - govet - unconvert - megacheck @@ -18,7 +18,7 @@ linters: - varcheck - stylecheck - gochecknoinits - - scopelint + - exportloopref - gocritic - nakedret - gosimple diff --git a/vendor/github.com/didip/tollbooth/v6/LICENSE b/vendor/github.com/didip/tollbooth/v7/LICENSE similarity index 100% rename from vendor/github.com/didip/tollbooth/v6/LICENSE rename to vendor/github.com/didip/tollbooth/v7/LICENSE diff --git a/vendor/github.com/didip/tollbooth/v6/README.md b/vendor/github.com/didip/tollbooth/v7/README.md similarity index 91% rename from vendor/github.com/didip/tollbooth/v6/README.md rename to vendor/github.com/didip/tollbooth/v7/README.md index 302704a..8a45bd8 100644 --- a/vendor/github.com/didip/tollbooth/v6/README.md +++ b/vendor/github.com/didip/tollbooth/v7/README.md @@ -9,7 +9,6 @@ This is a generic middleware to rate-limit HTTP requests. **NOTE 2:** Major version changes are backward-incompatible. `v2.0.0` streamlines the ugliness of the old API. - ## Versions **v1.0.0:** This version maintains the old API but all the thirdparty modules are moved to their own repo. @@ -24,14 +23,17 @@ This is a generic middleware to rate-limit HTTP requests. **v6.x.x:** Replaced `go-cache` with `github.com/go-pkgz/expirable-cache` because `go-cache` leaks goroutines. +**v7.x.x:** Replaced `time/rate` with `embedded time/rate` so that we can support more rate limit headers. ## Five Minute Tutorial + ```go package main import ( - "github.com/didip/tollbooth" "net/http" + + "github.com/didip/tollbooth/v7" ) func HelloHandler(w http.ResponseWriter, req *http.Request) { @@ -51,7 +53,9 @@ func main() { ```go import ( "time" - "github.com/didip/tollbooth/limiter" + + "github.com/didip/tollbooth/v7" + "github.com/didip/tollbooth/v7/limiter" ) lmt := tollbooth.NewLimiter(1, nil) @@ -120,6 +124,13 @@ func main() { * `X-Rate-Limit-Request-Remote-Addr` The rejected request `RemoteAddr`. + Upon both success and rejection [RateLimit](https://datatracker.ietf.org/doc/html/draft-ietf-httpapi-ratelimit-headers) headers are sent: + + * `RateLimit-Limit` The maximum request limit within the time window (1s). + + * `RateLimit-Reset` The rate-limiter time window duration in seconds (always 1s). + + * `RateLimit-Remaining` The remaining tokens. 5. Customize your own message or function when limit is reached. @@ -138,7 +149,6 @@ func main() { 6. Tollbooth does not require external storage since it uses an algorithm called [Token Bucket](http://en.wikipedia.org/wiki/Token_bucket) [(Go library: golang.org/x/time/rate)](https://godoc.org/golang.org/x/time/rate). - ## Other Web Frameworks Sometimes, other frameworks require a little bit of shim to use Tollbooth. These shims below are contributed by the community, so I make no promises on how well they work. The one I am familiar with are: Chi, Gin, and Negroni. @@ -159,7 +169,6 @@ Sometimes, other frameworks require a little bit of shim to use Tollbooth. These * [Negroni](https://github.com/didip/tollbooth_negroni) - ## My other Go libraries * [Stopwatch](https://github.com/didip/stopwatch): A small library to measure latency of things. Useful if you want to report latency data to Graphite. diff --git a/vendor/github.com/didip/tollbooth/v6/errors/errors.go b/vendor/github.com/didip/tollbooth/v7/errors/errors.go similarity index 100% rename from vendor/github.com/didip/tollbooth/v6/errors/errors.go rename to vendor/github.com/didip/tollbooth/v7/errors/errors.go diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/github.com/didip/tollbooth/v7/internal/time/AUTHORS similarity index 100% rename from vendor/golang.org/x/sync/AUTHORS rename to vendor/github.com/didip/tollbooth/v7/internal/time/AUTHORS diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/github.com/didip/tollbooth/v7/internal/time/CONTRIBUTORS similarity index 100% rename from vendor/golang.org/x/sync/CONTRIBUTORS rename to vendor/github.com/didip/tollbooth/v7/internal/time/CONTRIBUTORS diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/github.com/didip/tollbooth/v7/internal/time/LICENSE similarity index 100% rename from vendor/golang.org/x/time/LICENSE rename to vendor/github.com/didip/tollbooth/v7/internal/time/LICENSE diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/github.com/didip/tollbooth/v7/internal/time/PATENTS similarity index 100% rename from vendor/golang.org/x/time/PATENTS rename to vendor/github.com/didip/tollbooth/v7/internal/time/PATENTS diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/github.com/didip/tollbooth/v7/internal/time/rate/rate.go similarity index 97% rename from vendor/golang.org/x/time/rate/rate.go rename to vendor/github.com/didip/tollbooth/v7/internal/time/rate/rate.go index b0b982e..6c3b442 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/github.com/didip/tollbooth/v7/internal/time/rate/rate.go @@ -94,6 +94,14 @@ func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } +// TokensAt returns the number of tokens available for the given time. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + // AllowN reports whether n events may happen at time now. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. @@ -306,27 +314,15 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() - defer lim.mu.Unlock() if lim.limit == Inf { + lim.mu.Unlock() return Reservation{ ok: true, lim: lim, tokens: n, timeToAct: now, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: now, - } } now, last, tokens := lim.advance(now) @@ -363,6 +359,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio lim.last = last } + lim.mu.Unlock() return r } @@ -388,9 +385,6 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, // durationFromTokens is a unit conversion function from the number of tokens to the duration // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { - if limit <= 0 { - return InfDuration - } seconds := tokens / float64(limit) return time.Duration(float64(time.Second) * seconds) } @@ -398,8 +392,5 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - if limit <= 0 { - return 0 - } return d.Seconds() * float64(limit) } diff --git a/vendor/github.com/didip/tollbooth/v7/libstring/libstring.go b/vendor/github.com/didip/tollbooth/v7/libstring/libstring.go new file mode 100644 index 0000000..730b654 --- /dev/null +++ b/vendor/github.com/didip/tollbooth/v7/libstring/libstring.go @@ -0,0 +1,101 @@ +// Package libstring provides various string related functions. +package libstring + +import ( + "net" + "net/http" + "strings" +) + +// StringInSlice finds needle in a slice of strings. +func StringInSlice(sliceString []string, needle string) bool { + for _, b := range sliceString { + if b == needle { + return true + } + } + return false +} + +// RemoteIP finds IP Address given http.Request struct. +func RemoteIP(ipLookups []string, forwardedForIndexFromBehind int, r *http.Request) string { + realIP := r.Header.Get("X-Real-IP") + forwardedFor := r.Header.Get("X-Forwarded-For") + + for _, lookup := range ipLookups { + if lookup == "RemoteAddr" { + // 1. Cover the basic use cases for both ipv4 and ipv6 + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + // 2. Upon error, just return the remote addr. + return r.RemoteAddr + } + return ip + } + if lookup == "X-Forwarded-For" && forwardedFor != "" { + // X-Forwarded-For is potentially a list of addresses separated with "," + parts := strings.Split(forwardedFor, ",") + for i, p := range parts { + parts[i] = strings.TrimSpace(p) + } + + partIndex := len(parts) - 1 - forwardedForIndexFromBehind + if partIndex < 0 { + partIndex = 0 + } + + return parts[partIndex] + } + if lookup == "X-Real-IP" && realIP != "" { + return realIP + } + } + + return "" +} + +// CanonicalizeIP returns a form of ip suitable for comparison to other IPs. +// For IPv4 addresses, this is simply the whole string. +// For IPv6 addresses, this is the /64 prefix. +func CanonicalizeIP(ip string) string { + isIPv6 := false + // This is how net.ParseIP decides if an address is IPv6 + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.7:src/net/ip.go;l=704 + for i := 0; !isIPv6 && i < len(ip); i++ { + switch ip[i] { + case '.': + // IPv4 + return ip + case ':': + // IPv6 + isIPv6 = true + } + } + if !isIPv6 { + // Not an IP address at all + return ip + } + + // By default, the string representation of a net.IPNet (masked IP address) is just + // "full_address/mask_bits". But using that will result in different addresses with + // the same /64 prefix comparing differently. So we need to zero out the last 64 bits + // so that all IPs in the same prefix will be the same. + // + // Note: When 1.18 is the minimum Go version, this can be written more cleanly like: + // netip.PrefixFrom(netip.MustParseAddr(ipv6), 64).Masked().Addr().String() + // (With appropriate error checking.) + + ipv6 := net.ParseIP(ip) + if ipv6 == nil { + return ip + } + + const bytesToZero = (128 - 64) / 8 + for i := len(ipv6) - bytesToZero; i < len(ipv6); i++ { + ipv6[i] = 0 + } + + // Note that this doesn't have the "/64" suffix customary with a CIDR representation, + // but those three bytes add nothing for us. + return ipv6.String() +} diff --git a/vendor/github.com/didip/tollbooth/v6/limiter/limiter.go b/vendor/github.com/didip/tollbooth/v7/limiter/limiter.go similarity index 96% rename from vendor/github.com/didip/tollbooth/v6/limiter/limiter.go rename to vendor/github.com/didip/tollbooth/v7/limiter/limiter.go index 3d35b9e..c64a7f2 100644 --- a/vendor/github.com/didip/tollbooth/v6/limiter/limiter.go +++ b/vendor/github.com/didip/tollbooth/v7/limiter/limiter.go @@ -7,7 +7,8 @@ import ( "time" cache "github.com/go-pkgz/expirable-cache" - "golang.org/x/time/rate" + + "github.com/didip/tollbooth/v7/internal/time/rate" ) // New is a constructor for Limiter. @@ -374,6 +375,11 @@ func (l *Limiter) RemoveBasicAuthUsers(basicAuthUsers []string) *Limiter { return l } +// DeleteExpiredTokenBuckets is thread-safe way of deleting expired token buckets +func (l *Limiter) DeleteExpiredTokenBuckets() { + l.tokenBuckets.DeleteExpired() +} + // SetHeaders is thread-safe way of setting map of HTTP headers to limit. func (l *Limiter) SetHeaders(headers map[string][]string) *Limiter { if l.headers == nil { @@ -592,3 +598,13 @@ func (l *Limiter) LimitReached(key string) bool { return l.limitReachedWithTokenBucketTTL(key, ttl) } + +// Tokens returns current amount of tokens left in the Bucket identified by key. +func (l *Limiter) Tokens(key string) int { + expiringMap, found := l.tokenBuckets.Get(key) + if !found { + return 0 + } + + return int(expiringMap.(*rate.Limiter).TokensAt(time.Now())) +} diff --git a/vendor/github.com/didip/tollbooth/v6/limiter/limiter_options.go b/vendor/github.com/didip/tollbooth/v7/limiter/limiter_options.go similarity index 100% rename from vendor/github.com/didip/tollbooth/v6/limiter/limiter_options.go rename to vendor/github.com/didip/tollbooth/v7/limiter/limiter_options.go diff --git a/vendor/github.com/didip/tollbooth/v6/tollbooth.go b/vendor/github.com/didip/tollbooth/v7/tollbooth.go similarity index 84% rename from vendor/github.com/didip/tollbooth/v6/tollbooth.go rename to vendor/github.com/didip/tollbooth/v7/tollbooth.go index cf4ed98..0dcf82e 100644 --- a/vendor/github.com/didip/tollbooth/v6/tollbooth.go +++ b/vendor/github.com/didip/tollbooth/v7/tollbooth.go @@ -7,9 +7,9 @@ import ( "net/http" "strings" - "github.com/didip/tollbooth/v6/errors" - "github.com/didip/tollbooth/v6/libstring" - "github.com/didip/tollbooth/v6/limiter" + "github.com/didip/tollbooth/v7/errors" + "github.com/didip/tollbooth/v7/libstring" + "github.com/didip/tollbooth/v7/limiter" ) // setResponseHeaders configures X-Rate-Limit-Limit and X-Rate-Limit-Duration @@ -25,6 +25,14 @@ func setResponseHeaders(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Req w.Header().Add("X-Rate-Limit-Request-Remote-Addr", r.RemoteAddr) } +// setRateLimitResponseHeaders configures RateLimit-Limit, RateLimit-Remaining and RateLimit-Reset +// as seen at https://datatracker.ietf.org/doc/html/draft-ietf-httpapi-ratelimit-headers +func setRateLimitResponseHeaders(lmt *limiter.Limiter, w http.ResponseWriter, tokensLeft int) { + w.Header().Add("RateLimit-Limit", fmt.Sprintf("%d", int(math.Round(lmt.GetMax())))) + w.Header().Add("RateLimit-Reset", "1") + w.Header().Add("RateLimit-Remaining", fmt.Sprintf("%d", tokensLeft)) +} + // NewLimiter is a convenience function to limiter.New. func NewLimiter(max float64, tbOptions *limiter.ExpirableOptions) *limiter.Limiter { return limiter.New(tbOptions). @@ -36,11 +44,18 @@ func NewLimiter(max float64, tbOptions *limiter.ExpirableOptions) *limiter.Limit // LimitByKeys keeps track number of request made by keys separated by pipe. // It returns HTTPError when limit is exceeded. func LimitByKeys(lmt *limiter.Limiter, keys []string) *errors.HTTPError { + err, _ := LimitByKeysAndReturn(lmt, keys) + return err +} + +// LimitByKeysAndReturn keeps track number of request made by keys separated by pipe. +// It returns HTTPError when limit is exceeded, and also returns the current limit value. +func LimitByKeysAndReturn(lmt *limiter.Limiter, keys []string) (*errors.HTTPError, int) { if lmt.LimitReached(strings.Join(keys, "|")) { - return &errors.HTTPError{Message: lmt.GetMessage(), StatusCode: lmt.GetStatusCode()} + return &errors.HTTPError{Message: lmt.GetMessage(), StatusCode: lmt.GetStatusCode()}, 0 } - return nil + return nil, lmt.Tokens(strings.Join(keys, "|")) } // ShouldSkipLimiter is a series of filter that decides if request should be limited or not. @@ -49,6 +64,7 @@ func ShouldSkipLimiter(lmt *limiter.Limiter, r *http.Request) bool { // Filter by remote ip // If we are unable to find remoteIP, skip limiter remoteIP := libstring.RemoteIP(lmt.GetIPLookups(), lmt.GetForwardedForIndexFromBehind(), r) + remoteIP = libstring.CanonicalizeIP(remoteIP) if remoteIP == "" { return true } @@ -96,6 +112,10 @@ func ShouldSkipLimiter(lmt *limiter.Limiter, r *http.Request) bool { requestHeadersDefinedInLimiter = false for headerKey, headerValues := range lmtHeaders { + if len(headerValues) == 0 { + requestHeadersDefinedInLimiter = true + continue + } for _, headerValue := range headerValues { if r.Header.Get(headerKey) == headerValue { requestHeadersDefinedInLimiter = true @@ -176,6 +196,7 @@ func ShouldSkipLimiter(lmt *limiter.Limiter, r *http.Request) bool { // BuildKeys generates a slice of keys to rate-limit by given limiter and request structs. func BuildKeys(lmt *limiter.Limiter, r *http.Request) [][]string { remoteIP := libstring.RemoteIP(lmt.GetIPLookups(), lmt.GetForwardedForIndexFromBehind(), r) + remoteIP = libstring.CanonicalizeIP(remoteIP) path := r.URL.Path sliceKeys := make([][]string, 0) @@ -279,14 +300,24 @@ func LimitByRequest(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Request sliceKeys := BuildKeys(lmt, r) + // Get the lowest value over all keys to return in headers. + // Start with high arbitrary number so that any limit returned would be lower and would + // overwrite the value we start with. + var tokensLeft = math.MaxInt32 + // Loop sliceKeys and check if one of them has error. for _, keys := range sliceKeys { - httpError := LimitByKeys(lmt, keys) + httpError, keysLimit := LimitByKeysAndReturn(lmt, keys) + if tokensLeft > keysLimit { + tokensLeft = keysLimit + } if httpError != nil { + setRateLimitResponseHeaders(lmt, w, tokensLeft) return httpError } } + setRateLimitResponseHeaders(lmt, w, tokensLeft) return nil } diff --git a/vendor/github.com/didip/tollbooth_chi/tollbooth_chi.go b/vendor/github.com/didip/tollbooth_chi/tollbooth_chi.go index d433380..7e45c9a 100644 --- a/vendor/github.com/didip/tollbooth_chi/tollbooth_chi.go +++ b/vendor/github.com/didip/tollbooth_chi/tollbooth_chi.go @@ -1,9 +1,10 @@ package tollbooth_chi import ( - "github.com/didip/tollbooth/v6" - "github.com/didip/tollbooth/v6/limiter" "net/http" + + "github.com/didip/tollbooth/v7" + "github.com/didip/tollbooth/v7/limiter" ) func LimitHandler(lmt *limiter.Limiter) func(http.Handler) http.Handler { @@ -32,6 +33,7 @@ func (l *limiterWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { default: httpError := tollbooth.LimitByRequest(l.lmt, w, r) if httpError != nil { + l.lmt.ExecOnLimitReached(w, r) w.Header().Add("Content-Type", l.lmt.GetMessageContentType()) w.WriteHeader(httpError.StatusCode) w.Write([]byte(httpError.Message)) diff --git a/vendor/github.com/go-chi/chi/v5/CHANGELOG.md b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md index 88c68c6..a1feeec 100644 --- a/vendor/github.com/go-chi/chi/v5/CHANGELOG.md +++ b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## v5.0.8 (2022-12-07) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8 + + ## v5.0.7 (2021-11-18) - History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7 diff --git a/vendor/github.com/go-chi/chi/v5/Makefile b/vendor/github.com/go-chi/chi/v5/Makefile index 970a219..e0f18c7 100644 --- a/vendor/github.com/go-chi/chi/v5/Makefile +++ b/vendor/github.com/go-chi/chi/v5/Makefile @@ -1,15 +1,19 @@ +.PHONY: all all: @echo "**********************************************************" @echo "** chi build tool **" @echo "**********************************************************" +.PHONY: test test: go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware +.PHONY: test-router test-router: go test -race -v . +.PHONY: test-middleware test-middleware: go test -race -v ./middleware diff --git a/vendor/github.com/go-chi/chi/v5/README.md b/vendor/github.com/go-chi/chi/v5/README.md index 5d504d1..3e4cc4a 100644 --- a/vendor/github.com/go-chi/chi/v5/README.md +++ b/vendor/github.com/go-chi/chi/v5/README.md @@ -30,7 +30,7 @@ and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! * **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` * **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting * **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts -* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) +* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) * **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown * **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md)) * **No external dependencies** - plain ol' Go stdlib + net/http diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go index d2e5354..a1691bb 100644 --- a/vendor/github.com/go-chi/chi/v5/chi.go +++ b/vendor/github.com/go-chi/chi/v5/chi.go @@ -1,29 +1,29 @@ -// // Package chi is a small, idiomatic and composable router for building HTTP services. // -// chi requires Go 1.10 or newer. +// chi requires Go 1.14 or newer. // // Example: -// package main // -// import ( -// "net/http" +// package main +// +// import ( +// "net/http" // -// "github.com/go-chi/chi/v5" -// "github.com/go-chi/chi/v5/middleware" -// ) +// "github.com/go-chi/chi/v5" +// "github.com/go-chi/chi/v5/middleware" +// ) // -// func main() { -// r := chi.NewRouter() -// r.Use(middleware.Logger) -// r.Use(middleware.Recoverer) +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) // -// r.Get("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("root.")) -// }) +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) // -// http.ListenAndServe(":3333", r) -// } +// http.ListenAndServe(":3333", r) +// } // // See github.com/go-chi/chi/_examples/ for more in-depth examples. // @@ -47,12 +47,12 @@ // placeholder which will match / characters. // // Examples: -// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" -// "/user/{name}/info" matches "/user/jsmith/info" -// "/page/*" matches "/page/intro/latest" -// "/page/*/index" also matches "/page/intro/latest" -// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" // +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/{other}/index" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" package chi import "net/http" diff --git a/vendor/github.com/go-chi/chi/v5/context.go b/vendor/github.com/go-chi/chi/v5/context.go index 814c263..e78a238 100644 --- a/vendor/github.com/go-chi/chi/v5/context.go +++ b/vendor/github.com/go-chi/chi/v5/context.go @@ -121,7 +121,10 @@ func (x *Context) URLParam(key string) string { // } func (x *Context) RoutePattern() string { routePattern := strings.Join(x.RoutePatterns, "") - return replaceWildcards(routePattern) + routePattern = replaceWildcards(routePattern) + routePattern = strings.TrimSuffix(routePattern, "//") + routePattern = strings.TrimSuffix(routePattern, "/") + return routePattern } // replaceWildcards takes a route pattern and recursively replaces all @@ -130,7 +133,6 @@ func replaceWildcards(p string) string { if strings.Contains(p, "/*/") { return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) } - return p } diff --git a/vendor/github.com/go-chi/chi/v5/mux.go b/vendor/github.com/go-chi/chi/v5/mux.go index fff6a3c..47e64cf 100644 --- a/vendor/github.com/go-chi/chi/v5/mux.go +++ b/vendor/github.com/go-chi/chi/v5/mux.go @@ -29,8 +29,8 @@ type Mux struct { // Custom method not allowed handler methodNotAllowedHandler http.HandlerFunc - // Controls the behaviour of middleware chain generation when a mux - // is registered as an inline group inside another mux. + // A reference to the parent mux used by subrouters when mounting + // to a parent mux parent *Mux // Routing context pool @@ -42,6 +42,8 @@ type Mux struct { // The middleware stack middlewares []func(http.Handler) http.Handler + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. inline bool } diff --git a/vendor/github.com/go-pkgz/expirable-cache/.golangci.yml b/vendor/github.com/go-pkgz/expirable-cache/.golangci.yml index 7442cd4..870d3a5 100644 --- a/vendor/github.com/go-pkgz/expirable-cache/.golangci.yml +++ b/vendor/github.com/go-pkgz/expirable-cache/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 15 maligned: @@ -25,7 +23,7 @@ linters-settings: linters: enable: - megacheck - - golint + - revive - govet - unconvert - megacheck @@ -42,7 +40,7 @@ linters: - varcheck - stylecheck - gochecknoinits - - scopelint + - exportloopref - gocritic - nakedret - gosimple @@ -57,8 +55,4 @@ run: - vendor issues: - exclude-rules: - - text: "should have a package comment, unless it's in another file for this package" - linters: - - golint exclude-use-default: false diff --git a/vendor/github.com/go-pkgz/expirable-cache/README.md b/vendor/github.com/go-pkgz/expirable-cache/README.md index e0ab7b7..536e586 100644 --- a/vendor/github.com/go-pkgz/expirable-cache/README.md +++ b/vendor/github.com/go-pkgz/expirable-cache/README.md @@ -29,12 +29,12 @@ import ( "fmt" "time" - "github.com/go-pkgz/expirable-cache" + "github.com/go-pkgz/expirable-cache/v2" ) func main() { // make cache with short TTL and 3 max keys - c, _ := cache.NewCache(cache.MaxKeys(3), cache.TTL(time.Millisecond*10)) + c := cache.NewCache[string, string]().WithMaxKeys(3).WithTTL(time.Millisecond * 10) // set value under key1. // with 0 ttl (last parameter) will use cache-wide setting instead (10ms). diff --git a/vendor/github.com/go-pkgz/expirable-cache/cache.go b/vendor/github.com/go-pkgz/expirable-cache/cache.go index 46fb7c5..5966a8b 100644 --- a/vendor/github.com/go-pkgz/expirable-cache/cache.go +++ b/vendor/github.com/go-pkgz/expirable-cache/cache.go @@ -17,8 +17,6 @@ import ( "fmt" "sync" "time" - - "github.com/pkg/errors" ) // Cache defines cache interface @@ -73,7 +71,7 @@ func NewCache(options ...Option) (Cache, error) { for _, opt := range options { if err := opt(&res); err != nil { - return nil, errors.Wrap(err, "failed to set cache option") + return nil, fmt.Errorf("failed to set cache option: %w", err) } } return &res, nil diff --git a/vendor/github.com/go-pkgz/rest/.golangci.yml b/vendor/github.com/go-pkgz/rest/.golangci.yml index 824667f..77a0c5d 100644 --- a/vendor/github.com/go-pkgz/rest/.golangci.yml +++ b/vendor/github.com/go-pkgz/rest/.golangci.yml @@ -31,17 +31,14 @@ linters: - govet - unconvert - megacheck - - structcheck - gas - gocyclo - dupl - misspell - unparam - - varcheck - - deadcode + - unused - typecheck - ineffassign - - varcheck - stylecheck - gochecknoinits - exportloopref diff --git a/vendor/github.com/go-pkgz/rest/README.md b/vendor/github.com/go-pkgz/rest/README.md index c78ddee..b85c999 100644 --- a/vendor/github.com/go-pkgz/rest/README.md +++ b/vendor/github.com/go-pkgz/rest/README.md @@ -36,6 +36,45 @@ Org: Umputun pong ``` +### Health middleware + +Responds with the status 200 if all health checks passed, 503 if any failed. Both health path and check functions passed by consumer. +For production usage this middleware should be used with throttler/limiter and, optionally, with some auth middlewares + +Example of usage: + +```go + check1 := func(ctx context.Context) (name string, err error) { + // do some check, for example check DB connection + return "check1", nil // all good, passed + } + check2 := func(ctx context.Context) (name string, err error) { + // do some other check, for example ping an external service + return "check2", errors.New("some error") // check failed + } + + router := chi.NewRouter() + router.Use(rest.Health("/health", check1, check2)) +``` + +example of the actual call and response: + +``` +> http GET https://example.com/health + +HTTP/1.1 503 Service Unavailable +Date: Sun, 15 Jul 2018 19:40:31 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 36 + +[ + {"name":"check1","status":"ok"}, + {"name":"check2","status":"failed","error":"some error"} +] +``` + +_this middleware is pretty basic, but can be used for simple health checks. For more complex cases, like async/cached health checks see [alexliesenfeld/health](https://github.com/alexliesenfeld/health)_ + ### Logger middleware Logs request, request handling time and response. Log record fields in order of occurrence: @@ -93,22 +132,76 @@ Adds the HTTP Deprecation response header, see [draft-dalal-deprecation-header-0 BasicAuth middleware requires basic auth and matches user & passwd with client-provided checker. In case if no basic auth headers returns `StatusUnauthorized`, in case if checker failed - `StatusForbidden` -## Rewrite middleware +### Rewrite middleware Rewrites requests with from->to rule. Supports regex (like nginx) and prevents multiple rewrites. For example `Rewrite("^/sites/(.*)/settings/$", "/sites/settings/$1")` will change request's URL from `/sites/id1/settings/` to `/sites/settings/id1` -## NoCache middleware +### NoCache middleware Sets a number of HTTP headers to prevent a router (handler's) response from being cached by an upstream proxy and/or client. -## Headers middleware +### Headers middleware Sets headers (passed as key:value) to requests. I.e. `rest.Headers("Server:MyServer", "X-Blah:Foo")` -## Gzip middleware +### Gzip middleware Compresses response with gzip. +### RealIP middleware + +RealIP is a middleware that sets a http.Request's RemoteAddr to the results of parsing either the X-Forwarded-For or X-Real-IP headers. + +### Maybe middleware + +Maybe middleware will allow you to change the flow of the middleware stack execution depending on return +value of maybeFn(request). This is useful for example if you'd like to skip a middleware handler if +a request does not satisfy the maybeFn logic. + +### Reject middleware + +Reject is a middleware that rejects requests with a given status code and message based on a user-defined function. +This is useful for example if you'd like to reject requests to a particular resource based on a request header, or want to implement a conditional request handler based on service parameters. + +example with chi router: + +```go + router := chi.NewRouter() + + rejectFn := func(r *http.Request) (bool) { + return r.Header.Get("X-Request-Id") == "" // reject if no X-Request-Id header + } + + router.Use(rest.Reject(http.StatusBadRequest, "X-Request-Id header is required", rejectFn)) +``` + +### Benchmarks middleware + +Benchmarks middleware allows to measure the time of request handling, number of request per second and report aggregated metrics. This middleware keeps track of the request in the memory and keep up to 900 points (15 minutes, data-point per second). + +In order to retrieve the data user should call `Stats(d duration)` method. duration is the time window for which the benchmark data should be returned. It can be any duration from 1s to 15m. Note: all the time data is in microseconds. + +example with chi router: + +```go + router := chi.NewRouter() + bench = rest.NewBenchmarks() + router.Use(bench.Middleware) + ... + router.Get("/bench", func(w http.ResponseWriter, r *http.Request) { + resp := struct { + OneMin rest.BenchmarkStats `json:"1min"` + FiveMin rest.BenchmarkStats `json:"5min"` + FifteenMin rest.BenchmarkStats `json:"15min"` + }{ + bench.Stats(time.Minute), + bench.Stats(time.Minute * 5), + bench.Stats(time.Minute * 15), + } + render.JSON(w, r, resp) + }) +``` + ## Helpers - `rest.Wrap` - converts a list of middlewares to nested handlers calls (in reverse order) @@ -119,6 +212,8 @@ Compresses response with gzip. - `rest.SendErrorJSON` - makes `{error: blah, details: blah}` json body and responds with given error code. Also, adds context to the logged message - `rest.NewErrorLogger` - creates a struct providing shorter form of logger call - `rest.FileServer` - creates a file server for static assets with directory listing disabled +- `realip.Get` - returns client's IP address +- `rest.ParseFromTo` - parses "from" and "to" request's query params with various formats ## Profiler @@ -133,5 +228,6 @@ Profiler is a convenient subrouter used for mounting net/http/pprof, i.e. return r } ``` -It exposes a whole bunch of `/pprof/*` endpoints as well as `/vars`. Builtin support for `onlyIps` allows to restrict access, which is important if it runs on a publicly exposed port. However, counting on IP check only is not that reliable way to limit request and for production use it would be better to add some sort of auth (for example provided `BasicAuth` middleware) or run with a separate http server, exposed to internal ip/port only. + +It exposes a bunch of `/pprof/*` endpoints as well as `/vars`. Builtin support for `onlyIps` allows restricting access, which is important if it runs on a publicly exposed port. However, counting on IP check only is not that reliable way to limit request and for production use it would be better to add some sort of auth (for example provided `BasicAuth` middleware) or run with a separate http server, exposed to internal ip/port only. diff --git a/vendor/github.com/go-pkgz/rest/basic_auth.go b/vendor/github.com/go-pkgz/rest/basic_auth.go index 6ed0c68..f5d7777 100644 --- a/vendor/github.com/go-pkgz/rest/basic_auth.go +++ b/vendor/github.com/go-pkgz/rest/basic_auth.go @@ -1,9 +1,13 @@ package rest import ( + "context" + "crypto/subtle" "net/http" ) +const baContextKey = "authorizedWithBasicAuth" + // BasicAuth middleware requires basic auth and matches user & passwd with client-provided checker func BasicAuth(checker func(user, passwd string) bool) func(http.Handler) http.Handler { @@ -19,8 +23,25 @@ func BasicAuth(checker func(user, passwd string) bool) func(http.Handler) http.H w.WriteHeader(http.StatusForbidden) return } - h.ServeHTTP(w, r) + h.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), contextKey(baContextKey), true))) } return http.HandlerFunc(fn) } } + +// BasicAuthWithUserPasswd middleware requires basic auth and matches user & passwd with client-provided values +func BasicAuthWithUserPasswd(user, passwd string) func(http.Handler) http.Handler { + checkFn := func(reqUser, reqPasswd string) bool { + matchUser := subtle.ConstantTimeCompare([]byte(user), []byte(reqUser)) + matchPass := subtle.ConstantTimeCompare([]byte(passwd), []byte(reqPasswd)) + return matchUser == 1 && matchPass == 1 + } + return BasicAuth(checkFn) +} + +// IsAuthorized returns true is user authorized. +// it can be used in handlers to check if BasicAuth middleware was applied +func IsAuthorized(ctx context.Context) bool { + v := ctx.Value(contextKey(baContextKey)) + return v != nil && v.(bool) +} diff --git a/vendor/github.com/go-pkgz/rest/benchmarks.go b/vendor/github.com/go-pkgz/rest/benchmarks.go new file mode 100644 index 0000000..702e517 --- /dev/null +++ b/vendor/github.com/go-pkgz/rest/benchmarks.go @@ -0,0 +1,159 @@ +package rest + +import ( + "container/list" + "net/http" + "sync" + "time" +) + +var maxTimeRangeDefault = time.Duration(15) * time.Minute + +// Benchmarks is a basic benchmarking middleware collecting and reporting performance metrics +// It keeps track of the requests speeds and counts in 1s benchData buckets ,limiting the number of buckets +// to maxTimeRange. User can request the benchmark for any time duration. This is intended to be used +// for retrieving the benchmark data for the last minute, 5 minutes and up to maxTimeRange. +type Benchmarks struct { + st time.Time + data *list.List + lock sync.RWMutex + maxTimeRange time.Duration + + nowFn func() time.Time // for testing only +} + +type benchData struct { + // 1s aggregates + requests int + respTime time.Duration + minRespTime time.Duration + maxRespTime time.Duration + ts time.Time +} + +// BenchmarkStats holds the stats for a given interval +type BenchmarkStats struct { + Requests int `json:"requests"` + RequestsSec float64 `json:"requests_sec"` + AverageRespTime int64 `json:"average_resp_time"` + MinRespTime int64 `json:"min_resp_time"` + MaxRespTime int64 `json:"max_resp_time"` +} + +// NewBenchmarks creates a new benchmark middleware +func NewBenchmarks() *Benchmarks { + res := &Benchmarks{ + st: time.Now(), + data: list.New(), + nowFn: time.Now, + maxTimeRange: maxTimeRangeDefault, + } + return res +} + +// WithTimeRange sets the maximum time range for the benchmark to keep data for. +// Default is 15 minutes. The increase of this range will change memory utilization as each second of the range +// kept as benchData aggregate. The default means 15*60 = 900 seconds of data aggregate. +// Larger range allows for longer time periods to be benchmarked. +func (b *Benchmarks) WithTimeRange(max time.Duration) *Benchmarks { + b.lock.Lock() + defer b.lock.Unlock() + b.maxTimeRange = max + return b +} + +// Handler calculates 1/5/10m request per second and allows to access those values +func (b *Benchmarks) Handler(next http.Handler) http.Handler { + + fn := func(w http.ResponseWriter, r *http.Request) { + st := b.nowFn() + defer func() { + b.update(time.Since(st)) + }() + next.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) +} + +func (b *Benchmarks) update(reqDuration time.Duration) { + now := b.nowFn().Truncate(time.Second) + + b.lock.Lock() + defer b.lock.Unlock() + + // keep maxTimeRange in the list, drop the rest + for e := b.data.Front(); e != nil; e = e.Next() { + if b.data.Front().Value.(benchData).ts.After(b.nowFn().Add(-b.maxTimeRange)) { + break + } + b.data.Remove(b.data.Front()) + } + + last := b.data.Back() + if last == nil || last.Value.(benchData).ts.Before(now) { + b.data.PushBack(benchData{requests: 1, respTime: reqDuration, ts: now, + minRespTime: reqDuration, maxRespTime: reqDuration}) + return + } + + bd := last.Value.(benchData) + bd.requests++ + bd.respTime += reqDuration + + if bd.minRespTime == 0 || reqDuration < bd.minRespTime { + bd.minRespTime = reqDuration + } + if bd.maxRespTime == 0 || reqDuration > bd.maxRespTime { + bd.maxRespTime = reqDuration + } + + last.Value = bd +} + +// Stats returns the current benchmark stats for the given duration +func (b *Benchmarks) Stats(interval time.Duration) BenchmarkStats { + if interval < time.Second { // minimum interval is 1s due to the bucket size + return BenchmarkStats{} + } + + b.lock.RLock() + defer b.lock.RUnlock() + + var ( + requests int + respTime time.Duration + ) + + stInterval, fnInterval := time.Time{}, time.Time{} + var minRespTime, maxRespTime time.Duration + for e := b.data.Back(); e != nil; e = e.Prev() { // reverse order + bd := e.Value.(benchData) + if bd.ts.Before(b.nowFn().Add(-interval)) { + break + } + if minRespTime == 0 || bd.minRespTime < minRespTime { + minRespTime = bd.minRespTime + } + if maxRespTime == 0 || bd.maxRespTime > maxRespTime { + maxRespTime = bd.maxRespTime + } + requests += bd.requests + respTime += bd.respTime + if fnInterval.IsZero() { + fnInterval = bd.ts.Add(time.Second) + } + stInterval = bd.ts + } + + if requests == 0 { + return BenchmarkStats{} + } + + return BenchmarkStats{ + Requests: requests, + RequestsSec: float64(requests) / (fnInterval.Sub(stInterval).Seconds()), + AverageRespTime: respTime.Microseconds() / int64(requests), + MinRespTime: minRespTime.Microseconds(), + MaxRespTime: maxRespTime.Microseconds(), + } +} diff --git a/vendor/github.com/go-pkgz/rest/logger/logger.go b/vendor/github.com/go-pkgz/rest/logger/logger.go index cc7d9e5..dafdd3b 100644 --- a/vendor/github.com/go-pkgz/rest/logger/logger.go +++ b/vendor/github.com/go-pkgz/rest/logger/logger.go @@ -14,6 +14,8 @@ import ( "strconv" "strings" "time" + + "github.com/go-pkgz/rest/realip" ) // Middleware is a logger for rest requests. @@ -103,7 +105,10 @@ func (l *Middleware) Handler(next http.Handler) http.Handler { rawurl = unescURL } - remoteIP := l.remoteIP(r) + remoteIP, err := realip.Get(r) + if err != nil { + remoteIP = "unknown ip" + } if l.ipFn != nil { // mask ip with ipFn remoteIP = l.ipFn(remoteIP) } @@ -169,7 +174,7 @@ func (l *Middleware) formatDefault(r *http.Request, p *logParts) string { } // 127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "http://www.example.com/start.html" "Mozilla/4.08 [en] (Win98; I ;Nav)" -//nolint gosec +// nolint gosec func (l *Middleware) formatApacheCombined(r *http.Request, p *logParts) string { username := "-" if p.user != "" { @@ -291,17 +296,19 @@ func (l *Middleware) sanitizeQuery(rawQuery string) string { return query.Encode() } -// remoteIP gets address from X-Forwarded-For and than from request's remote address -func (l *Middleware) remoteIP(r *http.Request) (remoteIP string) { - - if remoteIP = r.Header.Get("X-Forwarded-For"); remoteIP == "" { - remoteIP = r.RemoteAddr +// AnonymizeIP is a function to reset the last part of IPv4 to 0. +// from 123.212.12.78 it will make 123.212.12.0 +func AnonymizeIP(ip string) string { + if ip == "" { + return "" } - remoteIP = strings.Split(remoteIP, ":")[0] - if strings.HasPrefix(remoteIP, "[") { - remoteIP = strings.Split(remoteIP, "]:")[0] + "]" + + parts := strings.Split(ip, ".") + if len(parts) != 4 { + return ip } - return remoteIP + + return strings.Join(parts[:3], ".") + ".0" } // customResponseWriter is an HTTP response logger that keeps HTTP status code and diff --git a/vendor/github.com/go-pkgz/rest/middleware.go b/vendor/github.com/go-pkgz/rest/middleware.go index 7e09e7e..6e50337 100644 --- a/vendor/github.com/go-pkgz/rest/middleware.go +++ b/vendor/github.com/go-pkgz/rest/middleware.go @@ -1,12 +1,14 @@ package rest import ( + "context" "net/http" "os" "runtime/debug" "strings" "github.com/go-pkgz/rest/logger" + "github.com/go-pkgz/rest/realip" ) // Wrap converts a list of middlewares to nested calls (in reverse order) @@ -53,6 +55,47 @@ func Ping(next http.Handler) http.Handler { return http.HandlerFunc(fn) } +// Health middleware response with health info and status (200 if healthy). Stops chain if health request detected +// passed checkers implements custom health checks and returns error if health check failed. The check has to return name +// regardless to the error state. +// For production usage this middleware should be used with throttler and, optionally, with BasicAuth middlewares +func Health(path string, checkers ...func(ctx context.Context) (name string, err error)) func(http.Handler) http.Handler { + + type hr struct { + Name string `json:"name"` + Status string `json:"status"` + Error string `json:"error,omitempty"` + } + + return func(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" || !strings.EqualFold(r.URL.Path, path) { + h.ServeHTTP(w, r) // not the health check request, continue the chain + return + } + resp := []hr{} + var anyError bool + for _, check := range checkers { + name, err := check(r.Context()) + hh := hr{Name: name, Status: "ok"} + if err != nil { + hh.Status = "failed" + hh.Error = err.Error() + anyError = true + } + resp = append(resp, hh) + } + if anyError { + w.WriteHeader(http.StatusServiceUnavailable) + } else { + w.WriteHeader(http.StatusOK) + } + RenderJSON(w, resp) + } + return http.HandlerFunc(fn) + } +} + // Recoverer is a middleware that recovers from panics, logs the panic and returns a HTTP 500 status if possible. func Recoverer(l logger.Backend) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { @@ -93,7 +136,7 @@ func Headers(headers ...string) func(http.Handler) http.Handler { // Maybe middleware will allow you to change the flow of the middleware stack execution depending on return // value of maybeFn(request). This is useful for example if you'd like to skip a middleware handler if -// a request does not satisfied the maybeFn logic. +// a request does not satisfy the maybeFn logic. // borrowed from https://github.com/go-chi/chi/blob/master/middleware/maybe.go func Maybe(mw func(http.Handler) http.Handler, maybeFn func(r *http.Request) bool) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { @@ -106,3 +149,36 @@ func Maybe(mw func(http.Handler) http.Handler, maybeFn func(r *http.Request) boo }) } } + +// RealIP is a middleware that sets a http.Request's RemoteAddr to the results +// of parsing either the X-Forwarded-For or X-Real-IP headers. +// +// This middleware should only be used if user can trust the headers sent with request. +// If reverse proxies are configured to pass along arbitrary header values from the client, +// or if this middleware used without a reverse proxy, malicious clients could set anything +// as X-Forwarded-For header and attack the server in various ways. +func RealIP(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if rip, err := realip.Get(r); err == nil { + r.RemoteAddr = rip + } + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +// Reject is a middleware that conditionally rejects requests with a given status code and message. +// user-defined condition function rejectFn is used to determine if the request should be rejected. +func Reject(errCode int, errMsg string, rejectFn func(r *http.Request) bool) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + if rejectFn(r) { + http.Error(w, errMsg, errCode) + return + } + h.ServeHTTP(w, r) + } + return http.HandlerFunc(fn) + } +} diff --git a/vendor/github.com/go-pkgz/rest/realip/real.go b/vendor/github.com/go-pkgz/rest/realip/real.go new file mode 100644 index 0000000..c30e3d4 --- /dev/null +++ b/vendor/github.com/go-pkgz/rest/realip/real.go @@ -0,0 +1,80 @@ +// Package realip extracts a real IP address from the request. +package realip + +import ( + "bytes" + "fmt" + "net" + "net/http" + "strings" +) + +type ipRange struct { + start net.IP + end net.IP +} + +var privateRanges = []ipRange{ + {start: net.ParseIP("10.0.0.0"), end: net.ParseIP("10.255.255.255")}, + {start: net.ParseIP("100.64.0.0"), end: net.ParseIP("100.127.255.255")}, + {start: net.ParseIP("172.16.0.0"), end: net.ParseIP("172.31.255.255")}, + {start: net.ParseIP("192.0.0.0"), end: net.ParseIP("192.0.0.255")}, + {start: net.ParseIP("192.168.0.0"), end: net.ParseIP("192.168.255.255")}, + {start: net.ParseIP("198.18.0.0"), end: net.ParseIP("198.19.255.255")}, +} + +// Get returns real ip from the given request +func Get(r *http.Request) (string, error) { + + for _, h := range []string{"X-Forwarded-For", "X-Real-Ip"} { + addresses := strings.Split(r.Header.Get(h), ",") + // march from right to left until we get a public address + // that will be the address right before our proxy. + for i := len(addresses) - 1; i >= 0; i-- { + ip := strings.TrimSpace(addresses[i]) + realIP := net.ParseIP(ip) + if !realIP.IsGlobalUnicast() || isPrivateSubnet(realIP) { + continue + } + return ip, nil + } + } + + // X-Forwarded-For header set but parsing failed above + if r.Header.Get("X-Forwarded-For") != "" { + return "", fmt.Errorf("no valid ip found") + } + + // get IP from RemoteAddr + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return "", fmt.Errorf("can't parse ip %q: %w", r.RemoteAddr, err) + } + if netIP := net.ParseIP(ip); netIP == nil { + return "", fmt.Errorf("no valid ip found") + } + + return ip, nil +} + +// inRange - check to see if a given ip address is within a range given +func inRange(r ipRange, ipAddress net.IP) bool { + // strcmp type byte comparison + if bytes.Compare(ipAddress, r.start) >= 0 && bytes.Compare(ipAddress, r.end) < 0 { + return true + } + return false +} + +// isPrivateSubnet - check to see if this ip is in a private subnet +func isPrivateSubnet(ipAddress net.IP) bool { + if ipCheck := ipAddress.To4(); ipCheck != nil { + for _, r := range privateRanges { + // check if this ip is in a private range + if inRange(r, ipAddress) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/go-pkgz/rest/rest.go b/vendor/github.com/go-pkgz/rest/rest.go index 82361bc..1b321ec 100644 --- a/vendor/github.com/go-pkgz/rest/rest.go +++ b/vendor/github.com/go-pkgz/rest/rest.go @@ -4,9 +4,9 @@ package rest import ( "bytes" "encoding/json" + "fmt" "net/http" - - "github.com/pkg/errors" + "time" ) // JSON is a map alias, just for convenience @@ -29,7 +29,7 @@ func RenderJSON(w http.ResponseWriter, data interface{}) { func RenderJSONFromBytes(w http.ResponseWriter, r *http.Request, data []byte) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") if _, err := w.Write(data); err != nil { - return errors.Wrapf(err, "failed to send response to %s", r.RemoteAddr) + return fmt.Errorf("failed to send response to %s: %w", r.RemoteAddr, err) } return nil } @@ -42,7 +42,7 @@ func RenderJSONWithHTML(w http.ResponseWriter, r *http.Request, v interface{}) e enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) if err := enc.Encode(v); err != nil { - return nil, errors.Wrap(err, "json encoding failed") + return nil, fmt.Errorf("json encoding failed: %w", err) } return buf.Bytes(), nil } @@ -67,3 +67,33 @@ func renderJSONWithStatus(w http.ResponseWriter, data interface{}, code int) { w.WriteHeader(code) _, _ = w.Write(buf.Bytes()) } + +// ParseFromTo parses from and to query params of the request +func ParseFromTo(r *http.Request) (from, to time.Time, err error) { + parseTimeStamp := func(ts string) (time.Time, error) { + formats := []string{ + "2006-01-02T15:04:05.000000000", + "2006-01-02T15:04:05", + "2006-01-02T15:04", + "20060102", + time.RFC3339, + time.RFC3339Nano, + } + + for _, f := range formats { + if t, e := time.Parse(f, ts); e == nil { + return t, nil + } + } + return time.Time{}, fmt.Errorf("can't parse date %q", ts) + } + + if from, err = parseTimeStamp(r.URL.Query().Get("from")); err != nil { + return from, to, fmt.Errorf("incorrect from time: %w", err) + } + + if to, err = parseTimeStamp(r.URL.Query().Get("to")); err != nil { + return from, to, fmt.Errorf("incorrect to time: %w", err) + } + return from, to, nil +} diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11cccc..0000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index ac3b93b..0000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,400 +0,0 @@ -// +build go1.7 - -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - frame runtime.Frame -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - // As of Go 1.9 we need room for up to three PC entries. - // - // 0. An entry for the stack frame prior to the target to check for - // special handling needed if that prior entry is runtime.sigpanic. - // 1. A possible second entry to hold metadata about skipped inlined - // functions. If inline functions were not skipped the target frame - // PC will be here. - // 2. A third entry for the target frame PC when the second entry - // is used for skipped inline functions. - var pcs [3]uintptr - n := runtime.Callers(skip+1, pcs[:]) - frames := runtime.CallersFrames(pcs[:n]) - frame, _ := frames.Next() - frame, _ = frames.Next() - - return Call{ - frame: frame, - } -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.frame == (runtime.Frame{}) { - return nil, ErrNoFunc - } - - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %k last segment of the package path -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH, -// or the module path joined to the path of source file relative -// to module root -// %#s full path of source file -// %+n import path qualified function name -// %+k full package path -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.frame == (runtime.Frame{}) { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file := c.frame.File - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = pkgFilePath(&c.frame) - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) - } - - case 'd': - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) - - case 'k': - name := c.frame.Function - const pathSep = "/" - start, end := 0, len(name) - if i := strings.LastIndex(name, pathSep); i != -1 { - start = i + len(pathSep) - } - const pkgSep = "." - if i := strings.Index(name[start:], pkgSep); i != -1 { - end = start + i - } - if s.Flag('+') { - start = 0 - } - io.WriteString(s, name[start:end]) - - case 'n': - name := c.frame.Function - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// Frame returns the call frame infomation for the Call. -func (c Call) Frame() runtime.Frame { - return c.frame -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -// -// Deprecated: Use Call.Frame instead. -func (c Call) PC() uintptr { - return c.frame.PC -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f844..d31b378 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e6..a2bf06e 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.7.2 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index e8ff994..63f2cd5 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,13 +16,159 @@ This package provides various compression algorithms. # changelog +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + * Jan 11, 2022 (v1.14.1) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+
+ See changes to v1.13.x + * Aug 30, 2021 (v1.13.5) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) @@ -52,7 +197,12 @@ This package provides various compression algorithms. * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -74,9 +224,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -135,7 +286,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -297,11 +448,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -323,6 +469,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f34191..dac97e5 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8..e36d974 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -172,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -193,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -213,10 +106,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -277,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -298,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -313,15 +211,17 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e8..ec71f7a 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6..4dcab8d 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc0..cdc9485 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -356,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { m := uint32(0) if len(s.prevTable) > 0 { for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false } } return int(m), reuse } for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + s.symbolLen = uint16(i) + 1 } return int(m), false } @@ -395,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false @@ -474,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -509,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -532,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -547,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -593,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -607,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -617,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -645,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -665,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -682,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -696,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -712,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1..42a237e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,13 +4,13 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -18,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq [4]byte - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -34,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf } - return dst, br.close() + return &[4][256]byte{} } // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. @@ -341,7 +247,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +276,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +306,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +335,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +365,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +395,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +425,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +455,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +485,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +493,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +547,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +575,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +601,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,195 +611,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -916,12 +654,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +680,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +689,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +698,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +707,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +725,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +734,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +743,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,49 +752,61 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +826,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +835,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1131,16 +888,15 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,106 +906,116 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { @@ -1257,21 +1023,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1063,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1072,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 0000000..ba7e8e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 0000000..c4c7ab2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 0000000..908c17d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ec..e8ad17a 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000..3954c51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000..e802579 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000..4465fbe --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 511bba6..298c4f8 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= len(lit) && len(lit) <= 65536 func emitLiteral(dst, lit []byte) int { @@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= 65535 // 4 <= length && length <= 65535 @@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int { // src[i:i+k-j] and src[j:k] have the same contents. // // It assumes that: +// // 0 <= i && i < j && j <= len(src) func extendMatch(src []byte, i, j int) int { for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { @@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // The table element type is uint16, as s < sLimit and sLimit < len(src) diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16..65b38ab 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + ## Installation Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. @@ -78,6 +80,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +109,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +289,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +314,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,62 +371,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17d..97299d4 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -62,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 { return v } -func (b *bitReader) get16BitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - // fillFast() will make sure at least 32 bits are available. // There must be at least 4 bytes available. func (b *bitReader) fillFast() { @@ -132,6 +126,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index b366182..78b3c61 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index dc587b2..2445bb4 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,13 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +42,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -76,19 +80,28 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - err error - decWG sync.WaitGroup + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -108,13 +121,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -132,11 +140,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -147,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -157,7 +171,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -166,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -184,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -192,85 +219,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -278,7 +234,7 @@ func (b *blockDec) decodeBuf(hist *history) error { if b.lowMem { b.dst = make([]byte, b.RLESize) } else { - b.dst = make([]byte, maxBlockSize) + b.dst = make([]byte, maxCompressedBlockSize) } } b.dst = b.dst[:b.RLESize] @@ -293,14 +249,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -310,30 +275,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -349,7 +302,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -360,7 +313,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -371,7 +324,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -381,7 +334,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -392,13 +345,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -406,19 +361,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -433,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -441,31 +390,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -474,27 +457,63 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -511,8 +530,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } - var seqs = &sequenceDecs{} + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -541,6 +568,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -548,34 +578,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -585,140 +617,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) return err } - if debugDecoder { - println("History merged ok") + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6..176788f 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -7,7 +7,6 @@ package zstd import ( "fmt" "io" - "io/ioutil" ) type byteBuffer interface { @@ -23,7 +22,7 @@ type byteBuffer interface { readByte() (byte, error) // Skip n bytes. - skipN(n int) error + skipN(n int64) error } // in-memory buffer @@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) { return r, nil } -func (b *byteBuf) skipN(n int) error { +func (b *byteBuf) skipN(n int64) error { bb := *b - if len(bb) < n { + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { return io.ErrUnexpectedEOF } *b = bb[n:] @@ -113,6 +111,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { @@ -121,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) { return r.tmp[0], nil } -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { err = io.ErrUnexpectedEOF } return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca1..0e59a24 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 5022e71..f6a2409 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -4,7 +4,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "io" @@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error { } h.HeaderSize += 4 b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } if len(in) < 4 { @@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:size], in[size:] h.HeaderSize += int(size) - switch size { + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) - switch fcsSize { + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58..7113e69 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,12 @@ package zstd import ( - "errors" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,15 +25,22 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -46,7 +56,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +94,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -89,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -130,7 +143,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +175,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -172,21 +186,23 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { bb2 := bb if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } b := bb2.Bytes() var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] } - dst, err := d.DecodeAll(b, dst[:0]) + dst, err := d.DecodeAll(b, dst) if err == nil { err = io.EOF } + // Save output buffer + d.syncStream.dstBuf = dst d.current.b = dst d.current.err = err d.current.flushed = true @@ -195,33 +211,40 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil + d.syncStream.dstBuf = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +266,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,19 +307,23 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } // Grab a block decoder and frame decoder. block := <-d.decoders frame := block.localFrame + initialSize := len(dst) defer func() { if debugDecoder { printf("re-adding decoder: %p", block) } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,34 +331,45 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - frame.history.setDict(&dict) - } if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } return dst, err } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } - if cap(dst) == 0 { + + if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. size := len(input) * 2 @@ -352,6 +387,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if err != nil { return dst, err } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } if len(frame.bBuf) == 0 { if debugDecoder { println("frame dbuf empty") @@ -368,33 +406,172 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false } + d.current.b = d.current.b[:0] + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +579,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +633,321 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block } - break + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8..07a90dd 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -14,21 +16,28 @@ type DOption func(*decoderOptions) error // options retains accumulated state of multiple options. type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -37,16 +46,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -54,7 +72,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -69,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -77,8 +101,20 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) return nil } } @@ -100,3 +136,34 @@ func WithDecoderMaxWindow(size uint64) DOption { return nil } } + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index a36ae83..66a95c1 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,7 +1,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -20,7 +19,10 @@ type dict struct { content []byte } -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { @@ -50,7 +52,7 @@ func loadDict(b []byte) (*dict, error) { ofDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}}, } - if !bytes.Equal(b[:4], dictMagic[:]) { + if string(b[:4]) != dictMagic { return nil, ErrMagicMismatch } d.id = binary.LittleEndian.Uint32(b[4:8]) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 15ae8ee..bfb2e14 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -16,6 +16,7 @@ type fastBase struct { cur int32 // maximum offset. Should be at least 2x block size. maxMatchOff int32 + bufferReset int32 hist []byte crc *xxhash.Digest tmp [8]byte @@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc { } func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. @@ -171,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { // We offset current position so everything will be out of reach. // If above reset line, history will be purged. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += e.maxMatchOff + int32(len(e.hist)) } e.hist = e.hist[:0] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 96028ec..830f5ba 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,6 +32,7 @@ type match struct { length int32 rep int32 est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } const highScore = 25000 @@ -84,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -192,8 +189,8 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { return a } return b @@ -219,22 +216,26 @@ encodeLoop: return m } - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) if canRepeat && best.length < goodEnough { cv32 := uint32(cv >> 8) spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) if best.length > 0 { cv32 = uint32(cv >> 24) spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) } } // Load next and check... @@ -261,26 +262,33 @@ encodeLoop: candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) if false { // Short at s+3. // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) } best = bestEnd } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 602c05e..8582f31 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -156,8 +152,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -416,15 +412,23 @@ encodeLoop: // Try to find a better match by searching for a long match at the end of the current best match if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) + s2 := s + skipBeginning + cv := load3232(src, s2) candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("long match at end-of-match") @@ -434,12 +438,13 @@ encodeLoop: // Check prev long... if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("prev long match at end-of-match") @@ -518,8 +523,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -578,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -674,8 +679,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -1047,8 +1052,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d6b3104..7d42510 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -127,8 +123,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -439,8 +435,8 @@ encodeLoop: var t int32 for { - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -685,7 +681,7 @@ encodeLoop: } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -785,8 +781,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -969,7 +965,7 @@ encodeLoop: te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -1002,8 +998,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1103,7 +1099,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) for i := range e.longTableShardDirty { e.longTableShardDirty[i] = false } @@ -1114,7 +1111,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + e.longTableShardDirty[i] = false } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 5f08a28..315b1a8 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) if debugEncoder { - if len(src) > maxBlockSize { + if len(src) > maxCompressedBlockSize { panic("src too big") } } // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -538,7 +538,7 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { return } // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } + e.table = [tableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -871,7 +869,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const shardCnt = tableShardCnt const shardSize = tableShardSize if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) for i := range e.tableShardDirty { e.tableShardDirty[i] = false } @@ -883,7 +882,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) e.tableShardDirty[i] = false } e.allDirty = false diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e3159..65c6c36 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "fmt" "io" + "math" rdebug "runtime/debug" "sync" @@ -98,23 +99,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +261,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) @@ -486,8 +529,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If a non-single block is needed the encoder will reset again. e.encoders <- enc }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } @@ -509,7 +552,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { @@ -597,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 5f2e1d0..8e15be2 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -3,6 +3,8 @@ package zstd import ( "errors" "fmt" + "math" + "math/bits" "runtime" "strings" ) @@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder { switch o.level { case SpeedFastest: if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedDefault: if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} case SpeedBetterCompression: if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -76,6 +78,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -282,7 +285,7 @@ func WithNoEntropyCompression(b bool) EOption { // a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. // For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { @@ -303,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -314,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f..d8e8a05 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -5,26 +5,20 @@ package zstd import ( - "bytes" + "encoding/binary" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -54,9 +43,9 @@ const ( MaxWindowSize = 1 << 29 ) -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" ) func newFrameDec(o decoderOptions) *frameDec { @@ -100,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error { copy(signature[1:], b) } - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) } // Break if not skippable frame. break @@ -117,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error { } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err = br.skipN(int(n)) + err = br.skipN(int64(n)) if err != nil { if debugDecoder { println("Reading discarded frame", err) @@ -125,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error { return err } } - if !bytes.Equal(signature[:], frameMagic) { + if string(signature[:]) != frameMagic { if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) } return ErrMagicMismatch } @@ -166,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -189,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -208,14 +193,14 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: @@ -229,9 +214,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -241,20 +227,27 @@ func (d *frameDec) reset(br byteBuffer) error { d.crc.Reset() } + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. d.WindowSize = d.FrameContentSize if d.WindowSize < MinWindowSize { d.WindowSize = MinWindowSize } - } - - if d.WindowSize > uint64(d.o.maxWindowSize) { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded } - return ErrWindowSizeExceeded } + // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { if debugDecoder { @@ -263,11 +256,23 @@ func (d *frameDec) reset(br byteBuffer) error { return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,209 +281,97 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) + buf, err := d.rawInput.readSmall(4) if err != nil { println("CRC missing?", err) return err } - if !bytes.Equal(tmp[:], want) { + if d.o.ignoreChecksum { + return nil + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) + printf("CRC check failed: got %08x, want %08x\n", got, want) } return ErrCRCMismatch } if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } -// runDecoder will create a sync decoder that will decode a block of data. +// runDecoder will run the decoder for the remainder of the frame. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -489,29 +382,47 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded break } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd..2f8860a 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := br.get16BitsFast(s.state.nbBits()) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000..d04a829 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000..bcde398 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000..332e51f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index 5442061..ab26326 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { @@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.state = c.stateTable[lu] } -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index cf33f29..5d73c21 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 { return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32..0916485 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,40 +10,48 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) + h.huffTree = nil } } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) } func (h *history) setDict(dict *dict) { @@ -54,6 +62,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +92,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md index 69aa3bb..777290d 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -2,12 +2,7 @@ VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 2c112a0..fc40c82 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -18,19 +18,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -52,10 +44,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index cea1785..ddb63aa 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,3 +1,4 @@ +//go:build !appengine && gc && !purego && !noasm // +build !appengine // +build gc // +build !purego @@ -5,212 +6,205 @@ #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 4d64a17..17901e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -1,13 +1,17 @@ -// +build gc,!purego,!noasm +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm #include "textflag.h" -// Register allocation. +// Registers: #define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 #define prime1 R7 #define prime2 R8 #define prime3 R9 @@ -25,60 +29,52 @@ #define round(acc, x) \ MADD prime2, acc, x, acc \ ROR $64-31, acc \ - MUL prime1, acc \ + MUL prime1, acc -// x = round(0, x). +// round0 performs the operation x = round(0, x). #define round0(x) \ MUL prime2, x \ ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop ADD prime1, prime2, v1 MOVD prime2, v2 MOVD $0, v3 NEG prime1, v4 - blocksLoop() + blockLoop() ROR $64-1, v1, x1 ROR $64-7, v2, x2 @@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 ADD x3, x4 ADD x2, x4, h - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) afterLoop: - ADD len, h + ADD n, h - TBZ $4, len, try8 + TBZ $4, n, try8 LDP.P 16(p), (x1, x2) round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h round0(x2) ROR $64-27, h - EOR x2 @> 64-27, h + EOR x2 @> 64-27, h, h MADD h, prime4, prime1, h try8: - TBZ $3, len, try4 + TBZ $3, n, try4 MOVD.P 8(p), x1 round0(x1) ROR $64-27, h - EOR x1 @> 64-27, h + EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h try4: - TBZ $2, len, try2 + TBZ $2, n, try2 MOVWU.P 4(p), x2 MUL prime1, x2 ROR $64-23, h - EOR x2 @> 64-23, h + EOR x2 @> 64-23, h, h MADD h, prime3, prime2, h try2: - TBZ $1, len, try1 + TBZ $1, n, try1 MOVHU.P 2(p), x3 AND $255, x3, x1 LSR $8, x3, x2 MUL prime5, x1 ROR $64-11, h - EOR x1 @> 64-11, h + EOR x1 @> 64-11, h, h MUL prime1, h MUL prime5, x2 ROR $64-11, h - EOR x2 @> 64-11, h + EOR x2 @> 64-11, h, h MUL prime1, h try1: - TBZ $0, len, end + TBZ $0, n, finalize MOVBU (p), x4 MUL prime5, x4 ROR $64-11, h - EOR x4 @> 64-11, h + EOR x4 @> 64-11, h, h MUL prime1, h -end: +finalize: EOR h >> 33, h MUL prime2, h EOR h >> 29, h @@ -163,24 +163,22 @@ end: RET // func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. MOVD d+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) - LDP b_base+8(FP), (p, len) + LDP b_base+8(FP), (p, n) - blocksLoop() + blockLoop() // Store updated state. STP (v1, v2), 0(digest) STP (v3, v4), 16(digest) - BIC $31, len - MOVD len, ret+32(FP) + BIC $31, n + MOVD n, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 1a1fac9..d4221ed 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -13,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(d *Digest, b []byte) int +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 209cb4a..0be16ce 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4..f833d15 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,142 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +func (s *sequenceDecs) freeDecoders() { + if f := s.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.litLengths.fse = nil + } + if f := s.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.offsets.fse = nil + } + if f := s.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.matchLengths.fse = nil + } +} + +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +287,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +312,52 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +368,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +400,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -279,6 +414,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { ofState = ofTable[ofState.newState()&maxTableMask] } else { bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -291,19 +427,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) + // Add final literals + s.out = append(out, s.literals...) + return br.close() } var bitMask [16]uint16 @@ -314,87 +445,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() @@ -457,36 +507,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000..191384a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000..b94993a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4079 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000..ac2a80d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b..29c15c8 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -18,36 +18,58 @@ const ZipMethodWinZip = 93 // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 -var zipReaderPool sync.Pool +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +79,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a..5ffa82f 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -36,8 +36,8 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 var ( // ErrReservedBlockType is returned when a reserved block type is found. @@ -52,6 +52,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -68,13 +72,16 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") @@ -99,37 +106,25 @@ func printf(format string, a ...interface{}) { } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - -// matchLen returns the maximum length. +// matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 } + n += 8 } - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] for i := range a { if a[i] != b[i] { - return i + checked + break } + n++ } - return len(a) + checked + return n + } func load3232(b []byte, i int32) uint32 { @@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - type byter interface { Bytes() []byte Len() int diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md index a21700c..04bdcef 100644 --- a/vendor/github.com/lufia/plan9stats/README.md +++ b/vendor/github.com/lufia/plan9stats/README.md @@ -1,2 +1,13 @@ # plan9stats A module for retrieving statistics of Plan 9 + +[![GoDev][godev-image]][godev-url] +[![Actions Status][actions-image]][actions-url] +[![Coverage Status][coveralls-image]][coveralls-url] + +[godev-image]: https://pkg.go.dev/badge/github.com/lufia/plan9stats +[godev-url]: https://pkg.go.dev/github.com/lufia/plan9stats +[actions-image]: https://github.com/lufia/plan9stats/workflows/Test/badge.svg?branch=main +[actions-url]: https://github.com/lufia/plan9stats/actions?workflow=Test +[coveralls-image]: https://coveralls.io/repos/github/lufia/plan9stats/badge.svg +[coveralls-url]: https://coveralls.io/github/lufia/plan9stats diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go index a101b91..eaff362 100644 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -178,9 +178,12 @@ func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { var up uint32parser pids := make([]uint32, len(names)) for i, s := range names { + if s == "trace" { + continue + } pids[i] = up.Parse(s) } - if up.err != nil { + if err := up.err; err != nil { return nil, err } sort.Slice(pids, func(i, j int) bool { diff --git a/vendor/github.com/lufia/plan9stats/disk.go b/vendor/github.com/lufia/plan9stats/disk.go new file mode 100644 index 0000000..4a4fa0c --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/disk.go @@ -0,0 +1,116 @@ +package stats + +import ( + "bufio" + "bytes" + "context" + "os" + "path/filepath" + "strings" +) + +// Storage represents /dev/sdXX/ctl. +type Storage struct { + Name string + Model string + Capacity int64 + Partitions []*Partition +} + +// Partition represents a part of /dev/sdXX/ctl. +type Partition struct { + Name string + Start uint64 + End uint64 +} + +func ReadStorages(ctx context.Context, opts ...Option) ([]*Storage, error) { + cfg := newConfig(opts...) + sdctl := filepath.Join(cfg.rootdir, "/dev/sdctl") + f, err := os.Open(sdctl) + if err != nil { + return nil, err + } + defer f.Close() + + var a []*Storage + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Split(scanner.Bytes(), delim) + if len(fields) == 0 { + continue + } + exp := string(fields[0]) + "*" + if !strings.HasPrefix(exp, "sd") { + continue + } + dir := filepath.Join(cfg.rootdir, "/dev", exp) + m, err := filepath.Glob(dir) + if err != nil { + return nil, err + } + for _, dir := range m { + s, err := readStorage(dir) + if err != nil { + return nil, err + } + a = append(a, s) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return a, nil +} + +func readStorage(dir string) (*Storage, error) { + ctl := filepath.Join(dir, "ctl") + f, err := os.Open(ctl) + if err != nil { + return nil, err + } + defer f.Close() + + var s Storage + s.Name = filepath.Base(dir) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Bytes() + switch { + case bytes.HasPrefix(line, []byte("inquiry ")): + s.Model = string(bytes.TrimSpace(line[7:])) + case bytes.HasPrefix(line, []byte("geometry ")): + fields := bytes.Split(line, delim) + if len(fields) < 3 { + continue + } + var p intParser + sec := p.ParseInt64(string(fields[1]), 10) + size := p.ParseInt64(string(fields[2]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Capacity = sec * size + case bytes.HasPrefix(line, []byte("part ")): + fields := bytes.Split(line, delim) + if len(fields) < 4 { + continue + } + var p intParser + start := p.ParseUint64(string(fields[2]), 10) + end := p.ParseUint64(string(fields[3]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Partitions = append(s.Partitions, &Partition{ + Name: string(fields[1]), + Start: start, + End: end, + }) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &s, nil +} diff --git a/vendor/github.com/lufia/plan9stats/host.go b/vendor/github.com/lufia/plan9stats/host.go index 957e903..a3921c0 100644 --- a/vendor/github.com/lufia/plan9stats/host.go +++ b/vendor/github.com/lufia/plan9stats/host.go @@ -109,12 +109,6 @@ func parseGauge(s string, r *Gauge) error { return nil } -type Storage struct { - Name string - Model string - Capacity int64 -} - type Interface struct { Name string Addr string @@ -177,7 +171,7 @@ func ReadHost(ctx context.Context, opts ...Option) (*Host, error) { } h.Sysname = name - a, err := readStorages(cfg.rootdir) + a, err := ReadStorages(ctx, opts...) if err != nil { return nil, err } @@ -203,80 +197,6 @@ func readSysname(rootdir string) (string, error) { return string(bytes.TrimSpace(b)), nil } -func readStorages(rootdir string) ([]*Storage, error) { - sdctl := filepath.Join(rootdir, "/dev/sdctl") - f, err := os.Open(sdctl) - if err != nil { - return nil, err - } - defer f.Close() - - var a []*Storage - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.Split(scanner.Bytes(), delim) - if len(fields) == 0 { - continue - } - exp := string(fields[0]) + "*" - if !strings.HasPrefix(exp, "sd") { - continue - } - dir := filepath.Join(rootdir, "/dev", exp) - m, err := filepath.Glob(dir) - if err != nil { - return nil, err - } - for _, dir := range m { - s, err := readStorage(dir) - if err != nil { - return nil, err - } - a = append(a, s) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return a, nil -} - -func readStorage(dir string) (*Storage, error) { - ctl := filepath.Join(dir, "ctl") - f, err := os.Open(ctl) - if err != nil { - return nil, err - } - defer f.Close() - - var s Storage - s.Name = filepath.Base(dir) - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Bytes() - switch { - case bytes.HasPrefix(line, []byte("inquiry")): - s.Model = string(bytes.TrimSpace(line[7:])) - case bytes.HasPrefix(line, []byte("geometry")): - fields := bytes.Split(line, delim) - if len(fields) < 3 { - continue - } - var p intParser - sec := p.ParseInt64(string(fields[1]), 10) - size := p.ParseInt64(string(fields[2]), 10) - if err := p.Err(); err != nil { - return nil, err - } - s.Capacity = sec * size - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &s, nil -} - type IPStats struct { ID int // number of interface in ipifc dir Device string // associated physical device diff --git a/vendor/github.com/lufia/plan9stats/int.go b/vendor/github.com/lufia/plan9stats/int.go index db133c4..e3c9dc8 100644 --- a/vendor/github.com/lufia/plan9stats/int.go +++ b/vendor/github.com/lufia/plan9stats/int.go @@ -26,6 +26,15 @@ func (p *intParser) ParseInt64(s string, base int) int64 { return n } +func (p *intParser) ParseUint64(s string, base int) uint64 { + if p.err != nil { + return 0 + } + var n uint64 + n, p.err = strconv.ParseUint(s, base, 64) + return n +} + func (p *intParser) Err() error { return p.err } diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore new file mode 100644 index 0000000..e0a38e1 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.gitignore @@ -0,0 +1,5 @@ +coverage.out +release-notes.txt +.directory +.chglog +.vscode \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md new file mode 100644 index 0000000..76cc20b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -0,0 +1,521 @@ + +## [Unreleased] + + + +## [v0.7.0] - 2023-01-08 +### Add +- Add geometric distribution functions ([#75](https://github.com/montanaflynn/stats/issues/75)) +- Add GitHub action go workflow + +### Remove +- Remove travis CI config + +### Update +- Update changelog with v0.7.0 changes +- Update github action go workflow +- Update geometric distribution tests + + + +## [v0.6.6] - 2021-04-26 +### Add +- Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) +- Add latest versions of Go to test against + +### Update +- Update changelog with v0.6.6 changes + +### Use +- Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) + + + +## [v0.6.5] - 2021-02-21 +### Add +- Add Float64Data.Quartiles documentation +- Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) + +### Fix +- Fix make release changelog command and add changelog history + +### Update +- Update changelog with v0.6.5 changes +- Update changelog with v0.6.4 changes +- Update README.md links to CHANGELOG.md and DOCUMENTATION.md +- Update README.md and Makefile with new release commands + + + +## [v0.6.4] - 2021-01-13 +### Fix +- Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) + +### Update +- Update changelog with v0.6.4 changes +- Update examples directory to include a README.md used for synopsis +- Update go.mod to include go version where modules are enabled by default +- Update changelog with v0.6.3 changes + + + +## [v0.6.3] - 2020-02-18 +### Add +- Add creating and committing changelog to Makefile release directive +- Add release-notes.txt and .chglog directory to .gitignore + +### Update +- Update exported tests to use import for better example documentation +- Update documentation using godoc2md +- Update changelog with v0.6.2 release + + + +## [v0.6.2] - 2020-02-18 +### Fix +- Fix linting errcheck warnings in go benchmarks + +### Update +- Update Makefile release directive to use correct release name + + + +## [v0.6.1] - 2020-02-18 +### Add +- Add StableSample function signature to readme + +### Fix +- Fix linting warnings for normal distribution functions formatting and tests + +### Update +- Update documentation links and rename DOC.md to DOCUMENTATION.md +- Update README with link to pkg.go.dev reference and release section +- Update Makefile with new changelog, docs, and release directives +- Update DOC.md links to GitHub source code +- Update doc.go comment and add DOC.md package reference file +- Update changelog using git-chglog + + + +## [v0.6.0] - 2020-02-17 +### Add +- Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) +- Add previous versions of Go to travis CI config +- Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) +- Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) +- Add doc.go file to show description and usage on godoc.org +- Add comments to new error and legacy error variables +- Add ExampleRound function to tests +- Add go.mod file for module support +- Add Sigmoid, SoftMax and Entropy methods and tests +- Add Entropy documentation, example and benchmarks +- Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) + +### Fix +- Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) +- Fix AutoCorrelation name in comments and remove unneeded Sprintf + +### Improve +- Improve documentation section with command comments + +### Remove +- Remove very old versions of Go in travis CI config +- Remove boolean comparison to get rid of gometalinter warning + +### Update +- Update license dates +- Update Distance functions signatures to use Float64Data +- Update Sigmoid examples +- Update error names with backward compatibility + +### Use +- Use relative link to examples/main.go +- Use a single var block for exported errors + + + +## [v0.5.0] - 2019-01-16 +### Add +- Add Sigmoid and Softmax functions + +### Fix +- Fix syntax highlighting and add CumulativeSum func + + + +## [v0.4.0] - 2019-01-14 +### Add +- Add goreport badge and documentation section to README.md +- Add Examples to test files +- Add AutoCorrelation and nist tests +- Add String method to statsErr type +- Add Y coordinate error for ExponentialRegression +- Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) +- Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) +- Add more tests and rename distance files +- Add coverage and benchmarks to azure pipeline +- Add go tests to azure pipeline + +### Change +- Change travis tip alias to master +- Change codecov to coveralls for code coverage + +### Fix +- Fix a few lint warnings +- Fix example error + +### Improve +- Improve test coverage of distance functions + +### Only +- Only run travis on stable and tip versions +- Only check code coverage on tip + +### Remove +- Remove azure CI pipeline +- Remove unnecessary type conversions + +### Return +- Return EmptyInputErr instead of EmptyInput + +### Set +- Set up CI with Azure Pipelines + + + +## [0.3.0] - 2017-12-02 +### Add +- Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) +- Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) +- Add support for time.Duration +- Add LoadRawData to docs and examples +- Add unit test for edge case that wasn't covered +- Add unit tests for edge cases that weren't covered +- Add pearson alias delegating to correlation +- Add CovariancePopulation to Float64Data +- Add pearson product-moment correlation coefficient +- Add population covariance +- Add random slice benchmarks +- Add all applicable functions as methods to Float64Data type +- Add MIT license badge +- Add link to examples/methods.go +- Add Protips for usage and documentation sections +- Add tests for rounding up +- Add webdoc target and remove linting from test target +- Add example usage and consolidate contributing information + +### Added +- Added MedianAbsoluteDeviation + +### Annotation +- Annotation spelling error + +### Auto +- auto commit +- auto commit + +### Calculate +- Calculate correlation with sdev and covp + +### Clean +- Clean up README.md and add info for offline docs + +### Consolidated +- Consolidated all error values. + +### Fix +- Fix Percentile logic +- Fix InterQuartileRange method test +- Fix zero percent bug and add test +- Fix usage example output typos + +### Improve +- Improve bounds checking in Percentile +- Improve error log messaging + +### Imput +- Imput -> Input + +### Include +- Include alternative way to set Float64Data in example + +### Make +- Make various changes to README.md + +### Merge +- Merge branch 'master' of github.com:montanaflynn/stats +- Merge master + +### Mode +- Mode calculation fix and tests + +### Realized +- Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. + +### Refactor +- Refactor testing of Round() +- Refactor setting Coordinate y field using Exp in place of Pow +- Refactor Makefile and add docs target + +### Remove +- Remove deep links to types and functions + +### Rename +- Rename file from types to data + +### Retrieve +- Retrieve InterQuartileRange for the Float64Data. + +### Split +- Split up stats.go into separate files + +### Support +- Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) + +### Switch +- Switch default and check targets + +### Update +- Update Readme +- Update example methods and some text +- Update README and include Float64Data type method examples + +### Pull Requests +- Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile +- Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test +- Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master +- Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds +- Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 +- Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration +- Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master +- Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master +- Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug +- Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master +- Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master +- Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master +- Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson +- Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD +- Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master +- Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce +- Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite + + + +## [0.2.0] - 2015-10-14 +### Add +- Add Makefile with gometalinter, testing, benchmarking and coverage report targets +- Add comments describing functions and structs +- Add Correlation func +- Add Covariance func +- Add tests for new function shortcuts +- Add StandardDeviation function as a shortcut to StandardDeviationPopulation +- Add Float64Data and Series types + +### Change +- Change Sample to return a standard []float64 type + +### Fix +- Fix broken link to Makefile +- Fix broken link and simplify code coverage reporting command +- Fix go vet warning about printf type placeholder +- Fix failing codecov test coverage reporting +- Fix link to CHANGELOG.md + +### Fixed +- Fixed typographical error, changed accomdate to accommodate in README. + +### Include +- Include Variance and StandardDeviation shortcuts + +### Pass +- Pass gometalinter + +### Refactor +- Refactor Variance function to be the same as population variance + +### Release +- Release version 0.2.0 + +### Remove +- Remove unneeded do packages and update cover URL +- Remove sudo from pip install + +### Reorder +- Reorder functions and sections + +### Revert +- Revert to legacy containers to preserve go1.1 testing + +### Switch +- Switch from legacy to container-based CI infrastructure + +### Update +- Update contributing instructions and mention Makefile + +### Pull Requests +- Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate + + + +## [0.1.0] - 2015-08-19 +### Add +- Add CONTRIBUTING.md + +### Rename +- Rename functions while preserving backwards compatibility + + + +## 0.0.9 - 2015-08-18 +### Add +- Add HarmonicMean func +- Add GeometricMean func +- Add .gitignore to avoid commiting test coverage report +- Add Outliers stuct and QuantileOutliers func +- Add Interquartile Range, Midhinge and Trimean examples +- Add Trimean +- Add Midhinge +- Add Inter Quartile Range +- Add a unit test to check for an empty slice error +- Add Quantiles struct and Quantile func +- Add more tests and fix a typo +- Add Golang 1.5 to build tests +- Add a standard MIT license file +- Add basic benchmarking +- Add regression models +- Add codecov token +- Add codecov +- Add check for slices with a single item +- Add coverage tests +- Add back previous Go versions to Travis CI +- Add Travis CI +- Add GoDoc badge +- Add Percentile and Float64ToInt functions +- Add another rounding test for whole numbers +- Add build status badge +- Add code coverage badge +- Add test for NaN, achieving 100% code coverage +- Add round function +- Add standard deviation function +- Add sum function + +### Add +- add tests for sample +- add sample + +### Added +- Added sample and population variance and deviation functions +- Added README + +### Adjust +- Adjust API ordering + +### Avoid +- Avoid unintended consequence of using sort + +### Better +- Better performing min/max +- Better description + +### Change +- Change package path to potentially fix a bug in earlier versions of Go + +### Clean +- Clean up README and add some more information +- Clean up test error + +### Consistent +- Consistent empty slice error messages +- Consistent var naming +- Consistent func declaration + +### Convert +- Convert ints to floats + +### Duplicate +- Duplicate packages for all versions + +### Export +- Export Coordinate struct fields + +### First +- First commit + +### Fix +- Fix copy pasta mistake testing the wrong function +- Fix error message +- Fix usage output and edit API doc section +- Fix testing edgecase where map was in wrong order +- Fix usage example +- Fix usage examples + +### Include +- Include the Nearest Rank method of calculating percentiles + +### More +- More commenting + +### Move +- Move GoDoc link to top + +### Redirect +- Redirect kills newer versions of Go + +### Refactor +- Refactor code and error checking + +### Remove +- Remove unnecassary typecasting in sum func +- Remove cover since it doesn't work for later versions of go +- Remove golint and gocoveralls + +### Rename +- Rename StandardDev to StdDev +- Rename StandardDev to StdDev + +### Return +- Return errors for all functions + +### Run +- Run go fmt to clean up formatting + +### Simplify +- Simplify min/max function + +### Start +- Start with minimal tests + +### Switch +- Switch wercker to travis and update todos + +### Table +- table testing style + +### Update +- Update README and move the example main.go into it's own file +- Update TODO list +- Update README +- Update usage examples and todos + +### Use +- Use codecov the recommended way +- Use correct string formatting types + +### Pull Requests +- Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample + + +[Unreleased]: https://github.com/montanaflynn/stats/compare/v0.7.0...HEAD +[v0.7.0]: https://github.com/montanaflynn/stats/compare/v0.6.6...v0.7.0 +[v0.6.6]: https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6 +[v0.6.5]: https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5 +[v0.6.4]: https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4 +[v0.6.3]: https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3 +[v0.6.2]: https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0 +[v0.5.0]: https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0 +[0.3.0]: https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0 +[0.2.0]: https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0 +[0.1.0]: https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0 diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md new file mode 100644 index 0000000..b567889 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md @@ -0,0 +1,1237 @@ + + +# stats +`import "github.com/montanaflynn/stats"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) +* [Examples](#pkg-examples) +* [Subdirectories](#pkg-subdirectories) + +## Overview +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) + + + + +## Index +* [Variables](#pkg-variables) +* [func AutoCorrelation(data Float64Data, lags int) (float64, error)](#AutoCorrelation) +* [func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ChebyshevDistance) +* [func Correlation(data1, data2 Float64Data) (float64, error)](#Correlation) +* [func Covariance(data1, data2 Float64Data) (float64, error)](#Covariance) +* [func CovariancePopulation(data1, data2 Float64Data) (float64, error)](#CovariancePopulation) +* [func CumulativeSum(input Float64Data) ([]float64, error)](#CumulativeSum) +* [func Entropy(input Float64Data) (float64, error)](#Entropy) +* [func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#EuclideanDistance) +* [func GeometricMean(input Float64Data) (float64, error)](#GeometricMean) +* [func HarmonicMean(input Float64Data) (float64, error)](#HarmonicMean) +* [func InterQuartileRange(input Float64Data) (float64, error)](#InterQuartileRange) +* [func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error)](#ManhattanDistance) +* [func Max(input Float64Data) (max float64, err error)](#Max) +* [func Mean(input Float64Data) (float64, error)](#Mean) +* [func Median(input Float64Data) (median float64, err error)](#Median) +* [func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviation) +* [func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error)](#MedianAbsoluteDeviationPopulation) +* [func Midhinge(input Float64Data) (float64, error)](#Midhinge) +* [func Min(input Float64Data) (min float64, err error)](#Min) +* [func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error)](#MinkowskiDistance) +* [func Mode(input Float64Data) (mode []float64, err error)](#Mode) +* [func Ncr(n, r int) int](#Ncr) +* [func NormBoxMullerRvs(loc float64, scale float64, size int) []float64](#NormBoxMullerRvs) +* [func NormCdf(x float64, loc float64, scale float64) float64](#NormCdf) +* [func NormEntropy(loc float64, scale float64) float64](#NormEntropy) +* [func NormFit(data []float64) [2]float64](#NormFit) +* [func NormInterval(alpha float64, loc float64, scale float64) [2]float64](#NormInterval) +* [func NormIsf(p float64, loc float64, scale float64) (x float64)](#NormIsf) +* [func NormLogCdf(x float64, loc float64, scale float64) float64](#NormLogCdf) +* [func NormLogPdf(x float64, loc float64, scale float64) float64](#NormLogPdf) +* [func NormLogSf(x float64, loc float64, scale float64) float64](#NormLogSf) +* [func NormMean(loc float64, scale float64) float64](#NormMean) +* [func NormMedian(loc float64, scale float64) float64](#NormMedian) +* [func NormMoment(n int, loc float64, scale float64) float64](#NormMoment) +* [func NormPdf(x float64, loc float64, scale float64) float64](#NormPdf) +* [func NormPpf(p float64, loc float64, scale float64) (x float64)](#NormPpf) +* [func NormPpfRvs(loc float64, scale float64, size int) []float64](#NormPpfRvs) +* [func NormSf(x float64, loc float64, scale float64) float64](#NormSf) +* [func NormStats(loc float64, scale float64, moments string) []float64](#NormStats) +* [func NormStd(loc float64, scale float64) float64](#NormStd) +* [func NormVar(loc float64, scale float64) float64](#NormVar) +* [func Pearson(data1, data2 Float64Data) (float64, error)](#Pearson) +* [func Percentile(input Float64Data, percent float64) (percentile float64, err error)](#Percentile) +* [func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error)](#PercentileNearestRank) +* [func PopulationVariance(input Float64Data) (pvar float64, err error)](#PopulationVariance) +* [func Round(input float64, places int) (rounded float64, err error)](#Round) +* [func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error)](#Sample) +* [func SampleVariance(input Float64Data) (svar float64, err error)](#SampleVariance) +* [func Sigmoid(input Float64Data) ([]float64, error)](#Sigmoid) +* [func SoftMax(input Float64Data) ([]float64, error)](#SoftMax) +* [func StableSample(input Float64Data, takenum int) ([]float64, error)](#StableSample) +* [func StandardDeviation(input Float64Data) (sdev float64, err error)](#StandardDeviation) +* [func StandardDeviationPopulation(input Float64Data) (sdev float64, err error)](#StandardDeviationPopulation) +* [func StandardDeviationSample(input Float64Data) (sdev float64, err error)](#StandardDeviationSample) +* [func StdDevP(input Float64Data) (sdev float64, err error)](#StdDevP) +* [func StdDevS(input Float64Data) (sdev float64, err error)](#StdDevS) +* [func Sum(input Float64Data) (sum float64, err error)](#Sum) +* [func Trimean(input Float64Data) (float64, error)](#Trimean) +* [func VarP(input Float64Data) (sdev float64, err error)](#VarP) +* [func VarS(input Float64Data) (sdev float64, err error)](#VarS) +* [func Variance(input Float64Data) (sdev float64, err error)](#Variance) +* [type Coordinate](#Coordinate) + * [func ExpReg(s []Coordinate) (regressions []Coordinate, err error)](#ExpReg) + * [func LinReg(s []Coordinate) (regressions []Coordinate, err error)](#LinReg) + * [func LogReg(s []Coordinate) (regressions []Coordinate, err error)](#LogReg) +* [type Float64Data](#Float64Data) + * [func LoadRawData(raw interface{}) (f Float64Data)](#LoadRawData) + * [func (f Float64Data) AutoCorrelation(lags int) (float64, error)](#Float64Data.AutoCorrelation) + * [func (f Float64Data) Correlation(d Float64Data) (float64, error)](#Float64Data.Correlation) + * [func (f Float64Data) Covariance(d Float64Data) (float64, error)](#Float64Data.Covariance) + * [func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error)](#Float64Data.CovariancePopulation) + * [func (f Float64Data) CumulativeSum() ([]float64, error)](#Float64Data.CumulativeSum) + * [func (f Float64Data) Entropy() (float64, error)](#Float64Data.Entropy) + * [func (f Float64Data) GeometricMean() (float64, error)](#Float64Data.GeometricMean) + * [func (f Float64Data) Get(i int) float64](#Float64Data.Get) + * [func (f Float64Data) HarmonicMean() (float64, error)](#Float64Data.HarmonicMean) + * [func (f Float64Data) InterQuartileRange() (float64, error)](#Float64Data.InterQuartileRange) + * [func (f Float64Data) Len() int](#Float64Data.Len) + * [func (f Float64Data) Less(i, j int) bool](#Float64Data.Less) + * [func (f Float64Data) Max() (float64, error)](#Float64Data.Max) + * [func (f Float64Data) Mean() (float64, error)](#Float64Data.Mean) + * [func (f Float64Data) Median() (float64, error)](#Float64Data.Median) + * [func (f Float64Data) MedianAbsoluteDeviation() (float64, error)](#Float64Data.MedianAbsoluteDeviation) + * [func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error)](#Float64Data.MedianAbsoluteDeviationPopulation) + * [func (f Float64Data) Midhinge(d Float64Data) (float64, error)](#Float64Data.Midhinge) + * [func (f Float64Data) Min() (float64, error)](#Float64Data.Min) + * [func (f Float64Data) Mode() ([]float64, error)](#Float64Data.Mode) + * [func (f Float64Data) Pearson(d Float64Data) (float64, error)](#Float64Data.Pearson) + * [func (f Float64Data) Percentile(p float64) (float64, error)](#Float64Data.Percentile) + * [func (f Float64Data) PercentileNearestRank(p float64) (float64, error)](#Float64Data.PercentileNearestRank) + * [func (f Float64Data) PopulationVariance() (float64, error)](#Float64Data.PopulationVariance) + * [func (f Float64Data) Quartile(d Float64Data) (Quartiles, error)](#Float64Data.Quartile) + * [func (f Float64Data) QuartileOutliers() (Outliers, error)](#Float64Data.QuartileOutliers) + * [func (f Float64Data) Quartiles() (Quartiles, error)](#Float64Data.Quartiles) + * [func (f Float64Data) Sample(n int, r bool) ([]float64, error)](#Float64Data.Sample) + * [func (f Float64Data) SampleVariance() (float64, error)](#Float64Data.SampleVariance) + * [func (f Float64Data) Sigmoid() ([]float64, error)](#Float64Data.Sigmoid) + * [func (f Float64Data) SoftMax() ([]float64, error)](#Float64Data.SoftMax) + * [func (f Float64Data) StandardDeviation() (float64, error)](#Float64Data.StandardDeviation) + * [func (f Float64Data) StandardDeviationPopulation() (float64, error)](#Float64Data.StandardDeviationPopulation) + * [func (f Float64Data) StandardDeviationSample() (float64, error)](#Float64Data.StandardDeviationSample) + * [func (f Float64Data) Sum() (float64, error)](#Float64Data.Sum) + * [func (f Float64Data) Swap(i, j int)](#Float64Data.Swap) + * [func (f Float64Data) Trimean(d Float64Data) (float64, error)](#Float64Data.Trimean) + * [func (f Float64Data) Variance() (float64, error)](#Float64Data.Variance) +* [type Outliers](#Outliers) + * [func QuartileOutliers(input Float64Data) (Outliers, error)](#QuartileOutliers) +* [type Quartiles](#Quartiles) + * [func Quartile(input Float64Data) (Quartiles, error)](#Quartile) +* [type Series](#Series) + * [func ExponentialRegression(s Series) (regressions Series, err error)](#ExponentialRegression) + * [func LinearRegression(s Series) (regressions Series, err error)](#LinearRegression) + * [func LogarithmicRegression(s Series) (regressions Series, err error)](#LogarithmicRegression) + +#### Examples +* [AutoCorrelation](#example_AutoCorrelation) +* [ChebyshevDistance](#example_ChebyshevDistance) +* [Correlation](#example_Correlation) +* [CumulativeSum](#example_CumulativeSum) +* [Entropy](#example_Entropy) +* [LinearRegression](#example_LinearRegression) +* [LoadRawData](#example_LoadRawData) +* [Max](#example_Max) +* [Median](#example_Median) +* [Min](#example_Min) +* [Round](#example_Round) +* [Sigmoid](#example_Sigmoid) +* [SoftMax](#example_SoftMax) +* [Sum](#example_Sum) + +#### Package files +[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) + + + +## Variables +``` go +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) +``` +These are the package-wide error values. +All error identification should use these values. +https://github.com/golang/go/wiki/Errors#naming + +``` go +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) +``` +Legacy error names that didn't start with Err + + + +## func [AutoCorrelation](/correlation.go?s=853:918#L38) +``` go +func AutoCorrelation(data Float64Data, lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + +## func [ChebyshevDistance](/distances.go?s=368:456#L20) +``` go +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ChebyshevDistance computes the Chebyshev distance between two data sets + + + +## func [Correlation](/correlation.go?s=112:171#L8) +``` go +func Correlation(data1, data2 Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + +## func [Covariance](/variance.go?s=1284:1342#L53) +``` go +func Covariance(data1, data2 Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + +## func [CovariancePopulation](/variance.go?s=1864:1932#L81) +``` go +func CovariancePopulation(data1, data2 Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables. + + + +## func [CumulativeSum](/cumulative_sum.go?s=81:137#L4) +``` go +func CumulativeSum(input Float64Data) ([]float64, error) +``` +CumulativeSum calculates the cumulative sum of the input slice + + + +## func [Entropy](/entropy.go?s=77:125#L6) +``` go +func Entropy(input Float64Data) (float64, error) +``` +Entropy provides calculation of the entropy + + + +## func [EuclideanDistance](/distances.go?s=836:924#L36) +``` go +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +EuclideanDistance computes the Euclidean distance between two data sets + + + +## func [GeometricMean](/mean.go?s=319:373#L18) +``` go +func GeometricMean(input Float64Data) (float64, error) +``` +GeometricMean gets the geometric mean for a slice of numbers + + + +## func [HarmonicMean](/mean.go?s=717:770#L40) +``` go +func HarmonicMean(input Float64Data) (float64, error) +``` +HarmonicMean gets the harmonic mean for a slice of numbers + + + +## func [InterQuartileRange](/quartile.go?s=821:880#L45) +``` go +func InterQuartileRange(input Float64Data) (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + +## func [ManhattanDistance](/distances.go?s=1277:1365#L50) +``` go +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) +``` +ManhattanDistance computes the Manhattan distance between two data sets + + + +## func [Max](/max.go?s=78:130#L8) +``` go +func Max(input Float64Data) (max float64, err error) +``` +Max finds the highest number in a slice + + + +## func [Mean](/mean.go?s=77:122#L6) +``` go +func Mean(input Float64Data) (float64, error) +``` +Mean gets the average of a slice of numbers + + + +## func [Median](/median.go?s=85:143#L6) +``` go +func Median(input Float64Data) (median float64, err error) +``` +Median gets the median number in a slice of numbers + + + +## func [MedianAbsoluteDeviation](/deviation.go?s=125:197#L6) +``` go +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median + + + +## func [MedianAbsoluteDeviationPopulation](/deviation.go?s=360:442#L11) +``` go +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + +## func [Midhinge](/quartile.go?s=1075:1124#L55) +``` go +func Midhinge(input Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + +## func [Min](/min.go?s=78:130#L6) +``` go +func Min(input Float64Data) (min float64, err error) +``` +Min finds the lowest number in a set of data + + + +## func [MinkowskiDistance](/distances.go?s=2152:2256#L75) +``` go +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) +``` +MinkowskiDistance computes the Minkowski distance between two data sets + +Arguments: + + + dataPointX: First set of data points + dataPointY: Second set of data points. Length of both data + sets must be equal. + lambda: aka p or city blocks; With lambda = 1 + returned distance is manhattan distance and + lambda = 2; it is euclidean distance. Lambda + reaching to infinite - distance would be chebysev + distance. + +Return: + + + Distance or error + + + +## func [Mode](/mode.go?s=85:141#L4) +``` go +func Mode(input Float64Data) (mode []float64, err error) +``` +Mode gets the mode [most frequent value(s)] of a slice of float64s + + + +## func [Ncr](/norm.go?s=7384:7406#L239) +``` go +func Ncr(n, r int) int +``` +Ncr is an N choose R algorithm. +Aaron Cannon's algorithm. + + + +## func [NormBoxMullerRvs](/norm.go?s=667:736#L23) +``` go +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 +``` +NormBoxMullerRvs generates random variates using the Box–Muller transform. +For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html + + + +## func [NormCdf](/norm.go?s=1826:1885#L52) +``` go +func NormCdf(x float64, loc float64, scale float64) float64 +``` +NormCdf is the cumulative distribution function. + + + +## func [NormEntropy](/norm.go?s=5773:5825#L180) +``` go +func NormEntropy(loc float64, scale float64) float64 +``` +NormEntropy is the differential entropy of the RV. + + + +## func [NormFit](/norm.go?s=6058:6097#L187) +``` go +func NormFit(data []float64) [2]float64 +``` +NormFit returns the maximum likelihood estimators for the Normal Distribution. +Takes array of float64 values. +Returns array of Mean followed by Standard Deviation. + + + +## func [NormInterval](/norm.go?s=6976:7047#L221) +``` go +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 +``` +NormInterval finds endpoints of the range that contains alpha percent of the distribution. + + + +## func [NormIsf](/norm.go?s=4330:4393#L137) +``` go +func NormIsf(p float64, loc float64, scale float64) (x float64) +``` +NormIsf is the inverse survival function (inverse of sf). + + + +## func [NormLogCdf](/norm.go?s=2016:2078#L57) +``` go +func NormLogCdf(x float64, loc float64, scale float64) float64 +``` +NormLogCdf is the log of the cumulative distribution function. + + + +## func [NormLogPdf](/norm.go?s=1590:1652#L47) +``` go +func NormLogPdf(x float64, loc float64, scale float64) float64 +``` +NormLogPdf is the log of the probability density function. + + + +## func [NormLogSf](/norm.go?s=2423:2484#L67) +``` go +func NormLogSf(x float64, loc float64, scale float64) float64 +``` +NormLogSf is the log of the survival function. + + + +## func [NormMean](/norm.go?s=6560:6609#L206) +``` go +func NormMean(loc float64, scale float64) float64 +``` +NormMean is the mean/expected value of the distribution. + + + +## func [NormMedian](/norm.go?s=6431:6482#L201) +``` go +func NormMedian(loc float64, scale float64) float64 +``` +NormMedian is the median of the distribution. + + + +## func [NormMoment](/norm.go?s=4694:4752#L146) +``` go +func NormMoment(n int, loc float64, scale float64) float64 +``` +NormMoment approximates the non-central (raw) moment of order n. +For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution + + + +## func [NormPdf](/norm.go?s=1357:1416#L42) +``` go +func NormPdf(x float64, loc float64, scale float64) float64 +``` +NormPdf is the probability density function. + + + +## func [NormPpf](/norm.go?s=2854:2917#L75) +``` go +func NormPpf(p float64, loc float64, scale float64) (x float64) +``` +NormPpf is the point percentile function. +This is based on Peter John Acklam's inverse normal CDF. +algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ + + + +## func [NormPpfRvs](/norm.go?s=247:310#L12) +``` go +func NormPpfRvs(loc float64, scale float64, size int) []float64 +``` +NormPpfRvs generates random variates using the Point Percentile Function. +For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ + + + +## func [NormSf](/norm.go?s=2250:2308#L62) +``` go +func NormSf(x float64, loc float64, scale float64) float64 +``` +NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). + + + +## func [NormStats](/norm.go?s=5277:5345#L162) +``` go +func NormStats(loc float64, scale float64, moments string) []float64 +``` +NormStats returns the mean, variance, skew, and/or kurtosis. +Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +Takes string containing any of 'mvsk'. +Returns array of m v s k in that order. + + + +## func [NormStd](/norm.go?s=6814:6862#L216) +``` go +func NormStd(loc float64, scale float64) float64 +``` +NormStd is the standard deviation of the distribution. + + + +## func [NormVar](/norm.go?s=6675:6723#L211) +``` go +func NormVar(loc float64, scale float64) float64 +``` +NormVar is the variance of the distribution. + + + +## func [Pearson](/correlation.go?s=655:710#L33) +``` go +func Pearson(data1, data2 Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables + + + +## func [Percentile](/percentile.go?s=98:181#L8) +``` go +func Percentile(input Float64Data, percent float64) (percentile float64, err error) +``` +Percentile finds the relative standing in a slice of floats + + + +## func [PercentileNearestRank](/percentile.go?s=1079:1173#L54) +``` go +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) +``` +PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method + + + +## func [PopulationVariance](/variance.go?s=828:896#L31) +``` go +func PopulationVariance(input Float64Data) (pvar float64, err error) +``` +PopulationVariance finds the amount of variance within a population + + + +## func [Round](/round.go?s=88:154#L6) +``` go +func Round(input float64, places int) (rounded float64, err error) +``` +Round a float to a specific decimal place or precision + + + +## func [Sample](/sample.go?s=112:192#L9) +``` go +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + +## func [SampleVariance](/variance.go?s=1058:1122#L42) +``` go +func SampleVariance(input Float64Data) (svar float64, err error) +``` +SampleVariance finds the amount of variance within a sample + + + +## func [Sigmoid](/sigmoid.go?s=228:278#L9) +``` go +func Sigmoid(input Float64Data) ([]float64, error) +``` +Sigmoid returns the input values in the range of -1 to 1 +along the sigmoid or s-shaped curve, commonly used in +machine learning while training neural networks as an +activation function. + + + +## func [SoftMax](/softmax.go?s=206:256#L8) +``` go +func SoftMax(input Float64Data) ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. It +is commonly used in machine learning neural networks. + + + +## func [StableSample](/sample.go?s=974:1042#L50) +``` go +func StableSample(input Float64Data, takenum int) ([]float64, error) +``` +StableSample like stable sort, it returns samples from input while keeps the order of original data. + + + +## func [StandardDeviation](/deviation.go?s=695:762#L27) +``` go +func StandardDeviation(input Float64Data) (sdev float64, err error) +``` +StandardDeviation the amount of variation in the dataset + + + +## func [StandardDeviationPopulation](/deviation.go?s=892:969#L32) +``` go +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + +## func [StandardDeviationSample](/deviation.go?s=1254:1327#L46) +``` go +func StandardDeviationSample(input Float64Data) (sdev float64, err error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + +## func [StdDevP](/legacy.go?s=339:396#L14) +``` go +func StdDevP(input Float64Data) (sdev float64, err error) +``` +StdDevP is a shortcut to StandardDeviationPopulation + + + +## func [StdDevS](/legacy.go?s=497:554#L19) +``` go +func StdDevS(input Float64Data) (sdev float64, err error) +``` +StdDevS is a shortcut to StandardDeviationSample + + + +## func [Sum](/sum.go?s=78:130#L6) +``` go +func Sum(input Float64Data) (sum float64, err error) +``` +Sum adds all the numbers of a slice together + + + +## func [Trimean](/quartile.go?s=1320:1368#L65) +``` go +func Trimean(input Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + +## func [VarP](/legacy.go?s=59:113#L4) +``` go +func VarP(input Float64Data) (sdev float64, err error) +``` +VarP is a shortcut to PopulationVariance + + + +## func [VarS](/legacy.go?s=193:247#L9) +``` go +func VarS(input Float64Data) (sdev float64, err error) +``` +VarS is a shortcut to SampleVariance + + + +## func [Variance](/variance.go?s=659:717#L26) +``` go +func Variance(input Float64Data) (sdev float64, err error) +``` +Variance the amount of variation in the dataset + + + + +## type [Coordinate](/regression.go?s=143:183#L9) +``` go +type Coordinate struct { + X, Y float64 +} + +``` +Coordinate holds the data in a series + + + + + + + +### func [ExpReg](/legacy.go?s=791:856#L29) +``` go +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) +``` +ExpReg is a shortcut to ExponentialRegression + + +### func [LinReg](/legacy.go?s=643:708#L24) +``` go +func LinReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LinReg is a shortcut to LinearRegression + + +### func [LogReg](/legacy.go?s=944:1009#L34) +``` go +func LogReg(s []Coordinate) (regressions []Coordinate, err error) +``` +LogReg is a shortcut to LogarithmicRegression + + + + + +## type [Float64Data](/data.go?s=80:106#L4) +``` go +type Float64Data []float64 +``` +Float64Data is a named type for []float64 with helper methods + + + + + + + +### func [LoadRawData](/load.go?s=119:168#L9) +``` go +func LoadRawData(raw interface{}) (f Float64Data) +``` +LoadRawData parses and converts a slice of mixed data types to floats + + + + + +### func (Float64Data) [AutoCorrelation](/data.go?s=3257:3320#L91) +``` go +func (f Float64Data) AutoCorrelation(lags int) (float64, error) +``` +AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay + + + + +### func (Float64Data) [Correlation](/data.go?s=3058:3122#L86) +``` go +func (f Float64Data) Correlation(d Float64Data) (float64, error) +``` +Correlation describes the degree of relationship between two sets of data + + + + +### func (Float64Data) [Covariance](/data.go?s=4801:4864#L141) +``` go +func (f Float64Data) Covariance(d Float64Data) (float64, error) +``` +Covariance is a measure of how much two sets of data change + + + + +### func (Float64Data) [CovariancePopulation](/data.go?s=4983:5056#L146) +``` go +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) +``` +CovariancePopulation computes covariance for entire population between two variables + + + + +### func (Float64Data) [CumulativeSum](/data.go?s=883:938#L28) +``` go +func (f Float64Data) CumulativeSum() ([]float64, error) +``` +CumulativeSum returns the cumulative sum of the data + + + + +### func (Float64Data) [Entropy](/data.go?s=5480:5527#L162) +``` go +func (f Float64Data) Entropy() (float64, error) +``` +Entropy provides calculation of the entropy + + + + +### func (Float64Data) [GeometricMean](/data.go?s=1332:1385#L40) +``` go +func (f Float64Data) GeometricMean() (float64, error) +``` +GeometricMean returns the median of the data + + + + +### func (Float64Data) [Get](/data.go?s=129:168#L7) +``` go +func (f Float64Data) Get(i int) float64 +``` +Get item in slice + + + + +### func (Float64Data) [HarmonicMean](/data.go?s=1460:1512#L43) +``` go +func (f Float64Data) HarmonicMean() (float64, error) +``` +HarmonicMean returns the mode of the data + + + + +### func (Float64Data) [InterQuartileRange](/data.go?s=3755:3813#L106) +``` go +func (f Float64Data) InterQuartileRange() (float64, error) +``` +InterQuartileRange finds the range between Q1 and Q3 + + + + +### func (Float64Data) [Len](/data.go?s=217:247#L10) +``` go +func (f Float64Data) Len() int +``` +Len returns length of slice + + + + +### func (Float64Data) [Less](/data.go?s=318:358#L13) +``` go +func (f Float64Data) Less(i, j int) bool +``` +Less returns if one number is less than another + + + + +### func (Float64Data) [Max](/data.go?s=645:688#L22) +``` go +func (f Float64Data) Max() (float64, error) +``` +Max returns the maximum number in the data + + + + +### func (Float64Data) [Mean](/data.go?s=1005:1049#L31) +``` go +func (f Float64Data) Mean() (float64, error) +``` +Mean returns the mean of the data + + + + +### func (Float64Data) [Median](/data.go?s=1111:1157#L34) +``` go +func (f Float64Data) Median() (float64, error) +``` +Median returns the median of the data + + + + +### func (Float64Data) [MedianAbsoluteDeviation](/data.go?s=1630:1693#L46) +``` go +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) +``` +MedianAbsoluteDeviation the median of the absolute deviations from the dataset median + + + + +### func (Float64Data) [MedianAbsoluteDeviationPopulation](/data.go?s=1842:1915#L51) +``` go +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) +``` +MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median + + + + +### func (Float64Data) [Midhinge](/data.go?s=3912:3973#L111) +``` go +func (f Float64Data) Midhinge(d Float64Data) (float64, error) +``` +Midhinge finds the average of the first and third quartiles + + + + +### func (Float64Data) [Min](/data.go?s=536:579#L19) +``` go +func (f Float64Data) Min() (float64, error) +``` +Min returns the minimum number in the data + + + + +### func (Float64Data) [Mode](/data.go?s=1217:1263#L37) +``` go +func (f Float64Data) Mode() ([]float64, error) +``` +Mode returns the mode of the data + + + + +### func (Float64Data) [Pearson](/data.go?s=3455:3515#L96) +``` go +func (f Float64Data) Pearson(d Float64Data) (float64, error) +``` +Pearson calculates the Pearson product-moment correlation coefficient between two variables. + + + + +### func (Float64Data) [Percentile](/data.go?s=2696:2755#L76) +``` go +func (f Float64Data) Percentile(p float64) (float64, error) +``` +Percentile finds the relative standing in a slice of floats + + + + +### func (Float64Data) [PercentileNearestRank](/data.go?s=2869:2939#L81) +``` go +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) +``` +PercentileNearestRank finds the relative standing using the Nearest Rank method + + + + +### func (Float64Data) [PopulationVariance](/data.go?s=4495:4553#L131) +``` go +func (f Float64Data) PopulationVariance() (float64, error) +``` +PopulationVariance finds the amount of variance within a population + + + + +### func (Float64Data) [Quartile](/data.go?s=3610:3673#L101) +``` go +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + +### func (Float64Data) [QuartileOutliers](/data.go?s=2542:2599#L71) +``` go +func (f Float64Data) QuartileOutliers() (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + +### func (Float64Data) [Quartiles](/data.go?s=5628:5679#L167) +``` go +func (f Float64Data) Quartiles() (Quartiles, error) +``` +Quartiles returns the three quartile points from instance of Float64Data + + + + +### func (Float64Data) [Sample](/data.go?s=4208:4269#L121) +``` go +func (f Float64Data) Sample(n int, r bool) ([]float64, error) +``` +Sample returns sample from input with replacement or without + + + + +### func (Float64Data) [SampleVariance](/data.go?s=4652:4706#L136) +``` go +func (f Float64Data) SampleVariance() (float64, error) +``` +SampleVariance finds the amount of variance within a sample + + + + +### func (Float64Data) [Sigmoid](/data.go?s=5169:5218#L151) +``` go +func (f Float64Data) Sigmoid() ([]float64, error) +``` +Sigmoid returns the input values along the sigmoid or s-shaped curve + + + + +### func (Float64Data) [SoftMax](/data.go?s=5359:5408#L157) +``` go +func (f Float64Data) SoftMax() ([]float64, error) +``` +SoftMax returns the input values in the range of 0 to 1 +with sum of all the probabilities being equal to one. + + + + +### func (Float64Data) [StandardDeviation](/data.go?s=2026:2083#L56) +``` go +func (f Float64Data) StandardDeviation() (float64, error) +``` +StandardDeviation the amount of variation in the dataset + + + + +### func (Float64Data) [StandardDeviationPopulation](/data.go?s=2199:2266#L61) +``` go +func (f Float64Data) StandardDeviationPopulation() (float64, error) +``` +StandardDeviationPopulation finds the amount of variation from the population + + + + +### func (Float64Data) [StandardDeviationSample](/data.go?s=2382:2445#L66) +``` go +func (f Float64Data) StandardDeviationSample() (float64, error) +``` +StandardDeviationSample finds the amount of variation from a sample + + + + +### func (Float64Data) [Sum](/data.go?s=764:807#L25) +``` go +func (f Float64Data) Sum() (float64, error) +``` +Sum returns the total of all the numbers in the data + + + + +### func (Float64Data) [Swap](/data.go?s=425:460#L16) +``` go +func (f Float64Data) Swap(i, j int) +``` +Swap switches out two numbers in slice + + + + +### func (Float64Data) [Trimean](/data.go?s=4059:4119#L116) +``` go +func (f Float64Data) Trimean(d Float64Data) (float64, error) +``` +Trimean finds the average of the median and the midhinge + + + + +### func (Float64Data) [Variance](/data.go?s=4350:4398#L126) +``` go +func (f Float64Data) Variance() (float64, error) +``` +Variance the amount of variation in the dataset + + + + +## type [Outliers](/outlier.go?s=73:139#L4) +``` go +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +``` +Outliers holds mild and extreme outliers found in data + + + + + + + +### func [QuartileOutliers](/outlier.go?s=197:255#L10) +``` go +func QuartileOutliers(input Float64Data) (Outliers, error) +``` +QuartileOutliers finds the mild and extreme outliers + + + + + +## type [Quartiles](/quartile.go?s=75:136#L6) +``` go +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +``` +Quartiles holds the three quartile points + + + + + + + +### func [Quartile](/quartile.go?s=205:256#L13) +``` go +func Quartile(input Float64Data) (Quartiles, error) +``` +Quartile returns the three quartile points from a slice of data + + + + + +## type [Series](/regression.go?s=76:100#L6) +``` go +type Series []Coordinate +``` +Series is a container for a series of data + + + + + + + +### func [ExponentialRegression](/regression.go?s=1089:1157#L50) +``` go +func ExponentialRegression(s Series) (regressions Series, err error) +``` +ExponentialRegression returns an exponential regression on data series + + +### func [LinearRegression](/regression.go?s=262:325#L14) +``` go +func LinearRegression(s Series) (regressions Series, err error) +``` +LinearRegression finds the least squares linear regression on data series + + +### func [LogarithmicRegression](/regression.go?s=1903:1971#L85) +``` go +func LogarithmicRegression(s Series) (regressions Series, err error) +``` +LogarithmicRegression returns an logarithmic regression on data series + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/montanaflynn/stats/LICENSE similarity index 94% rename from vendor/github.com/go-stack/stack/LICENSE.md rename to vendor/github.com/montanaflynn/stats/LICENSE index 2abf98e..1590961 100644 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ b/vendor/github.com/montanaflynn/stats/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Chris Hines +Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile new file mode 100644 index 0000000..969df12 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/Makefile @@ -0,0 +1,34 @@ +.PHONY: all + +default: test lint + +format: + go fmt . + +test: + go test -race + +check: format test + +benchmark: + go test -bench=. -benchmem + +coverage: + go test -coverprofile=coverage.out + go tool cover -html="coverage.out" + +lint: format + golangci-lint run . + +docs: + godoc2md github.com/montanaflynn/stats | sed -e s#src/target/##g > DOCUMENTATION.md + +release: + git-chglog --output CHANGELOG.md --next-tag ${TAG} + git add CHANGELOG.md + git commit -m "Update changelog with ${TAG} changes" + git tag ${TAG} + git-chglog $(TAG) | tail -n +4 | gsed '1s/^/$(TAG)\n/gm' > release-notes.txt + git push origin master ${TAG} + hub release create --copy -F release-notes.txt ${TAG} + diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md new file mode 100644 index 0000000..d9ca440 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/README.md @@ -0,0 +1,231 @@ +# Stats - Golang Statistics Package + +[![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] + +A well tested and comprehensive Golang statistics library / package / module with no dependencies. + +If you have any suggestions, problems or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! + +## Installation + +``` +go get github.com/montanaflynn/stats +``` + +## Example Usage + +All the functions can be seen in [examples/main.go](examples/main.go) but here's a little taste: + +```go +// start with some source data to use +data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + +// you could also use different types like this +// data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) +// data := stats.LoadRawData([]interface{}{1.1, "2", 3}) +// etc... + +median, _ := stats.Median(data) +fmt.Println(median) // 3.65 + +roundedMedian, _ := stats.Round(median, 0) +fmt.Println(roundedMedian) // 4 +``` + +## Documentation + +The entire API documentation is available on [GoDoc.org](http://godoc.org/github.com/montanaflynn/stats) or [pkg.go.dev](https://pkg.go.dev/github.com/montanaflynn/stats). + +You can also view docs offline with the following commands: + +``` +# Command line +godoc . # show all exported apis +godoc . Median # show a single function +godoc -ex . Round # show function with example +godoc . Float64Data # show the type and methods + +# Local website +godoc -http=:4444 # start the godoc server on port 4444 +open http://localhost:4444/pkg/github.com/montanaflynn/stats/ +``` + +The exported API is as follows: + +```go +var ( + ErrEmptyInput = statsError{"Input must not be empty."} + ErrNaN = statsError{"Not a number."} + ErrNegative = statsError{"Must not contain negative values."} + ErrZero = statsError{"Must not contain zero values."} + ErrBounds = statsError{"Input is outside of range."} + ErrSize = statsError{"Must be the same length."} + ErrInfValue = statsError{"Value is infinite."} + ErrYCoord = statsError{"Y Value must be greater than zero."} +) + +func Round(input float64, places int) (rounded float64, err error) {} + +type Float64Data []float64 + +func LoadRawData(raw interface{}) (f Float64Data) {} + +func AutoCorrelation(data Float64Data, lags int) (float64, error) {} +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Correlation(data1, data2 Float64Data) (float64, error) {} +func Covariance(data1, data2 Float64Data) (float64, error) {} +func CovariancePopulation(data1, data2 Float64Data) (float64, error) {} +func CumulativeSum(input Float64Data) ([]float64, error) {} +func Entropy(input Float64Data) (float64, error) {} +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func GeometricMean(input Float64Data) (float64, error) {} +func HarmonicMean(input Float64Data) (float64, error) {} +func InterQuartileRange(input Float64Data) (float64, error) {} +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} +func Max(input Float64Data) (max float64, err error) {} +func Mean(input Float64Data) (float64, error) {} +func Median(input Float64Data) (median float64, err error) {} +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) {} +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) {} +func Midhinge(input Float64Data) (float64, error) {} +func Min(input Float64Data) (min float64, err error) {} +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) {} +func Mode(input Float64Data) (mode []float64, err error) {} +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {} +func NormCdf(x float64, loc float64, scale float64) float64 {} +func NormEntropy(loc float64, scale float64) float64 {} +func NormFit(data []float64) [2]float64{} +func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {} +func NormIsf(p float64, loc float64, scale float64) (x float64) {} +func NormLogCdf(x float64, loc float64, scale float64) float64 {} +func NormLogPdf(x float64, loc float64, scale float64) float64 {} +func NormLogSf(x float64, loc float64, scale float64) float64 {} +func NormMean(loc float64, scale float64) float64 {} +func NormMedian(loc float64, scale float64) float64 {} +func NormMoment(n int, loc float64, scale float64) float64 {} +func NormPdf(x float64, loc float64, scale float64) float64 {} +func NormPpf(p float64, loc float64, scale float64) (x float64) {} +func NormPpfRvs(loc float64, scale float64, size int) []float64 {} +func NormSf(x float64, loc float64, scale float64) float64 {} +func NormStats(loc float64, scale float64, moments string) []float64 {} +func NormStd(loc float64, scale float64) float64 {} +func NormVar(loc float64, scale float64) float64 {} +func Pearson(data1, data2 Float64Data) (float64, error) {} +func Percentile(input Float64Data, percent float64) (percentile float64, err error) {} +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) {} +func PopulationVariance(input Float64Data) (pvar float64, err error) {} +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) {} +func SampleVariance(input Float64Data) (svar float64, err error) {} +func Sigmoid(input Float64Data) ([]float64, error) {} +func SoftMax(input Float64Data) ([]float64, error) {} +func StableSample(input Float64Data, takenum int) ([]float64, error) {} +func StandardDeviation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) {} +func StandardDeviationSample(input Float64Data) (sdev float64, err error) {} +func StdDevP(input Float64Data) (sdev float64, err error) {} +func StdDevS(input Float64Data) (sdev float64, err error) {} +func Sum(input Float64Data) (sum float64, err error) {} +func Trimean(input Float64Data) (float64, error) {} +func VarP(input Float64Data) (sdev float64, err error) {} +func VarS(input Float64Data) (sdev float64, err error) {} +func Variance(input Float64Data) (sdev float64, err error) {} +func ProbGeom(a int, b int, p float64) (prob float64, err error) {} +func ExpGeom(p float64) (exp float64, err error) {} +func VarGeom(p float64) (exp float64, err error) {} + +type Coordinate struct { + X, Y float64 +} + +type Series []Coordinate + +func ExponentialRegression(s Series) (regressions Series, err error) {} +func LinearRegression(s Series) (regressions Series, err error) {} +func LogarithmicRegression(s Series) (regressions Series, err error) {} + +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +func Quartile(input Float64Data) (Quartiles, error) {} +func QuartileOutliers(input Float64Data) (Outliers, error) {} +``` + +## Contributing + +Pull request are always welcome no matter how big or small. I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. + +1. Fork the repo and clone your fork +2. Create new branch (`git checkout -b some-thing`) +3. Make the desired changes +4. Ensure tests pass (`go test -cover` or `make test`) +5. Run lint and fix problems (`go vet .` or `make lint`) +6. Commit changes (`git commit -am 'Did something'`) +7. Push branch (`git push origin some-thing`) +8. Submit pull request + +To make things as seamless as possible please also consider the following steps: + +- Update `examples/main.go` with a simple example of the new feature +- Update `README.md` documentation section with any new exported API +- Keep 100% code coverage (you can check with `make coverage`) +- Squash commits into single units of work with `git rebase -i new-feature` + +## Releasing + +To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). + +First install the tools used to generate the markdown files: + +``` +go get github.com/davecheney/godoc2md +go get github.com/golangci/golangci-lint/cmd/golangci-lint +``` + +Then you can run these `make` directives: + +``` +# Generate DOCUMENTATION.md +make docs +``` + +Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github release: + +``` +make release TAG=v0.x.x +``` + +## MIT License + +Copyright (c) 2014-2021 Montana Flynn (https://montanaflynn.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[travis-url]: https://travis-ci.org/montanaflynn/stats +[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg + +[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master +[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg + +[goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats +[goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats + +[godoc-url]: https://godoc.org/github.com/montanaflynn/stats +[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg + +[pkggodev-url]: https://pkg.go.dev/github.com/montanaflynn/stats +[pkggodev-svg]: https://gistcdn.githack.com/montanaflynn/b02f1d78d8c0de8435895d7e7cd0d473/raw/17f2a5a69f1323ecd42c00e0683655da96d9ecc8/badge.svg + +[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE +[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go new file mode 100644 index 0000000..4acab94 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/correlation.go @@ -0,0 +1,60 @@ +package stats + +import ( + "math" +) + +// Correlation describes the degree of relationship between two sets of data +func Correlation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + sdev1, _ := StandardDeviationPopulation(data1) + sdev2, _ := StandardDeviationPopulation(data2) + + if sdev1 == 0 || sdev2 == 0 { + return 0, nil + } + + covp, _ := CovariancePopulation(data1, data2) + return covp / (sdev1 * sdev2), nil +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables +func Pearson(data1, data2 Float64Data) (float64, error) { + return Correlation(data1, data2) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func AutoCorrelation(data Float64Data, lags int) (float64, error) { + if len(data) < 1 { + return 0, EmptyInputErr + } + + mean, _ := Mean(data) + + var result, q float64 + + for i := 0; i < lags; i++ { + v := (data[0] - mean) * (data[0] - mean) + for i := 1; i < len(data); i++ { + delta0 := data[i-1] - mean + delta1 := data[i] - mean + q += (delta0*delta1 - q) / float64(i+1) + v += (delta1*delta1 - v) / float64(i+1) + } + + result = q / v + } + + return result, nil +} diff --git a/vendor/github.com/montanaflynn/stats/cumulative_sum.go b/vendor/github.com/montanaflynn/stats/cumulative_sum.go new file mode 100644 index 0000000..e5305da --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/cumulative_sum.go @@ -0,0 +1,21 @@ +package stats + +// CumulativeSum calculates the cumulative sum of the input slice +func CumulativeSum(input Float64Data) ([]float64, error) { + + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + cumSum := make([]float64, input.Len()) + + for i, val := range input { + if i == 0 { + cumSum[i] = val + } else { + cumSum[i] = cumSum[i-1] + val + } + } + + return cumSum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go new file mode 100644 index 0000000..b86f0d8 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/data.go @@ -0,0 +1,169 @@ +package stats + +// Float64Data is a named type for []float64 with helper methods +type Float64Data []float64 + +// Get item in slice +func (f Float64Data) Get(i int) float64 { return f[i] } + +// Len returns length of slice +func (f Float64Data) Len() int { return len(f) } + +// Less returns if one number is less than another +func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } + +// Swap switches out two numbers in slice +func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// Min returns the minimum number in the data +func (f Float64Data) Min() (float64, error) { return Min(f) } + +// Max returns the maximum number in the data +func (f Float64Data) Max() (float64, error) { return Max(f) } + +// Sum returns the total of all the numbers in the data +func (f Float64Data) Sum() (float64, error) { return Sum(f) } + +// CumulativeSum returns the cumulative sum of the data +func (f Float64Data) CumulativeSum() ([]float64, error) { return CumulativeSum(f) } + +// Mean returns the mean of the data +func (f Float64Data) Mean() (float64, error) { return Mean(f) } + +// Median returns the median of the data +func (f Float64Data) Median() (float64, error) { return Median(f) } + +// Mode returns the mode of the data +func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } + +// GeometricMean returns the median of the data +func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } + +// HarmonicMean returns the mode of the data +func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } + +// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { + return MedianAbsoluteDeviation(f) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { + return MedianAbsoluteDeviationPopulation(f) +} + +// StandardDeviation the amount of variation in the dataset +func (f Float64Data) StandardDeviation() (float64, error) { + return StandardDeviation(f) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func (f Float64Data) StandardDeviationPopulation() (float64, error) { + return StandardDeviationPopulation(f) +} + +// StandardDeviationSample finds the amount of variation from a sample +func (f Float64Data) StandardDeviationSample() (float64, error) { + return StandardDeviationSample(f) +} + +// QuartileOutliers finds the mild and extreme outliers +func (f Float64Data) QuartileOutliers() (Outliers, error) { + return QuartileOutliers(f) +} + +// Percentile finds the relative standing in a slice of floats +func (f Float64Data) Percentile(p float64) (float64, error) { + return Percentile(f, p) +} + +// PercentileNearestRank finds the relative standing using the Nearest Rank method +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { + return PercentileNearestRank(f, p) +} + +// Correlation describes the degree of relationship between two sets of data +func (f Float64Data) Correlation(d Float64Data) (float64, error) { + return Correlation(f, d) +} + +// AutoCorrelation is the correlation of a signal with a delayed copy of itself as a function of delay +func (f Float64Data) AutoCorrelation(lags int) (float64, error) { + return AutoCorrelation(f, lags) +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func (f Float64Data) Pearson(d Float64Data) (float64, error) { + return Pearson(f, d) +} + +// Quartile returns the three quartile points from a slice of data +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { + return Quartile(d) +} + +// InterQuartileRange finds the range between Q1 and Q3 +func (f Float64Data) InterQuartileRange() (float64, error) { + return InterQuartileRange(f) +} + +// Midhinge finds the average of the first and third quartiles +func (f Float64Data) Midhinge(d Float64Data) (float64, error) { + return Midhinge(d) +} + +// Trimean finds the average of the median and the midhinge +func (f Float64Data) Trimean(d Float64Data) (float64, error) { + return Trimean(d) +} + +// Sample returns sample from input with replacement or without +func (f Float64Data) Sample(n int, r bool) ([]float64, error) { + return Sample(f, n, r) +} + +// Variance the amount of variation in the dataset +func (f Float64Data) Variance() (float64, error) { + return Variance(f) +} + +// PopulationVariance finds the amount of variance within a population +func (f Float64Data) PopulationVariance() (float64, error) { + return PopulationVariance(f) +} + +// SampleVariance finds the amount of variance within a sample +func (f Float64Data) SampleVariance() (float64, error) { + return SampleVariance(f) +} + +// Covariance is a measure of how much two sets of data change +func (f Float64Data) Covariance(d Float64Data) (float64, error) { + return Covariance(f, d) +} + +// CovariancePopulation computes covariance for entire population between two variables +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { + return CovariancePopulation(f, d) +} + +// Sigmoid returns the input values along the sigmoid or s-shaped curve +func (f Float64Data) Sigmoid() ([]float64, error) { + return Sigmoid(f) +} + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. +func (f Float64Data) SoftMax() ([]float64, error) { + return SoftMax(f) +} + +// Entropy provides calculation of the entropy +func (f Float64Data) Entropy() (float64, error) { + return Entropy(f) +} + +// Quartiles returns the three quartile points from instance of Float64Data +func (f Float64Data) Quartiles() (Quartiles, error) { + return Quartile(f) +} diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go new file mode 100644 index 0000000..e69a19f --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/deviation.go @@ -0,0 +1,57 @@ +package stats + +import "math" + +// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { + return MedianAbsoluteDeviationPopulation(input) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + i := copyslice(input) + m, _ := Median(i) + + for key, value := range i { + i[key] = math.Abs(value - m) + } + + return Median(i) +} + +// StandardDeviation the amount of variation in the dataset +func StandardDeviation(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the population variance + vp, _ := PopulationVariance(input) + + // Return the population standard deviation + return math.Sqrt(vp), nil +} + +// StandardDeviationSample finds the amount of variation from a sample +func StandardDeviationSample(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sample variance + vs, _ := SampleVariance(input) + + // Return the sample standard deviation + return math.Sqrt(vs), nil +} diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go new file mode 100644 index 0000000..c2b7d8f --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/distances.go @@ -0,0 +1,88 @@ +package stats + +import ( + "math" +) + +// Validate data for distance calculation +func validateData(dataPointX, dataPointY Float64Data) error { + if len(dataPointX) == 0 || len(dataPointY) == 0 { + return EmptyInputErr + } + + if len(dataPointX) != len(dataPointY) { + return SizeErr + } + return nil +} + +// ChebyshevDistance computes the Chebyshev distance between two data sets +func ChebyshevDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + var tempDistance float64 + for i := 0; i < len(dataPointY); i++ { + tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) + if distance < tempDistance { + distance = tempDistance + } + } + return distance, nil +} + +// EuclideanDistance computes the Euclidean distance between two data sets +func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) + } + return math.Sqrt(distance), nil +} + +// ManhattanDistance computes the Manhattan distance between two data sets +func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) + } + return distance, nil +} + +// MinkowskiDistance computes the Minkowski distance between two data sets +// +// Arguments: +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// Return: +// Distance or error +func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + for i := 0; i < len(dataPointY); i++ { + distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) + } + distance = math.Pow(distance, 1/lambda) + if math.IsInf(distance, 1) { + return math.NaN(), InfValue + } + return distance, nil +} diff --git a/vendor/github.com/montanaflynn/stats/doc.go b/vendor/github.com/montanaflynn/stats/doc.go new file mode 100644 index 0000000..facb8d5 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/doc.go @@ -0,0 +1,23 @@ +/* +Package stats is a well tested and comprehensive +statistics library package with no dependencies. + +Example Usage: + + // start with some source data to use + data := []float64{1.0, 2.1, 3.2, 4.823, 4.1, 5.8} + + // you could also use different types like this + // data := stats.LoadRawData([]int{1, 2, 3, 4, 5}) + // data := stats.LoadRawData([]interface{}{1.1, "2", 3}) + // etc... + + median, _ := stats.Median(data) + fmt.Println(median) // 3.65 + + roundedMedian, _ := stats.Round(median, 0) + fmt.Println(roundedMedian) // 4 + +MIT License Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) +*/ +package stats diff --git a/vendor/github.com/montanaflynn/stats/entropy.go b/vendor/github.com/montanaflynn/stats/entropy.go new file mode 100644 index 0000000..95263b0 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/entropy.go @@ -0,0 +1,31 @@ +package stats + +import "math" + +// Entropy provides calculation of the entropy +func Entropy(input Float64Data) (float64, error) { + input, err := normalize(input) + if err != nil { + return math.NaN(), err + } + var result float64 + for i := 0; i < input.Len(); i++ { + v := input.Get(i) + if v == 0 { + continue + } + result += (v * math.Log(v)) + } + return -result, nil +} + +func normalize(input Float64Data) (Float64Data, error) { + sum, err := input.Sum() + if err != nil { + return Float64Data{}, err + } + for i := 0; i < input.Len(); i++ { + input[i] = input[i] / sum + } + return input, nil +} diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go new file mode 100644 index 0000000..95f82ff --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/errors.go @@ -0,0 +1,35 @@ +package stats + +type statsError struct { + err string +} + +func (s statsError) Error() string { + return s.err +} + +func (s statsError) String() string { + return s.err +} + +// These are the package-wide error values. +// All error identification should use these values. +// https://github.com/golang/go/wiki/Errors#naming +var ( + // ErrEmptyInput Input must not be empty + ErrEmptyInput = statsError{"Input must not be empty."} + // ErrNaN Not a number + ErrNaN = statsError{"Not a number."} + // ErrNegative Must not contain negative values + ErrNegative = statsError{"Must not contain negative values."} + // ErrZero Must not contain zero values + ErrZero = statsError{"Must not contain zero values."} + // ErrBounds Input is outside of range + ErrBounds = statsError{"Input is outside of range."} + // ErrSize Must be the same length + ErrSize = statsError{"Must be the same length."} + // ErrInfValue Value is infinite + ErrInfValue = statsError{"Value is infinite."} + // ErrYCoord Y Value must be greater than zero + ErrYCoord = statsError{"Y Value must be greater than zero."} +) diff --git a/vendor/github.com/montanaflynn/stats/geometric_distribution.go b/vendor/github.com/montanaflynn/stats/geometric_distribution.go new file mode 100644 index 0000000..db785dd --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/geometric_distribution.go @@ -0,0 +1,42 @@ +package stats + +import ( + "math" +) + +// ProbGeom generates the probability for a geometric random variable +// with parameter p to achieve success in the interval of [a, b] trials +// See https://en.wikipedia.org/wiki/Geometric_distribution for more information +func ProbGeom(a int, b int, p float64) (prob float64, err error) { + if (a > b) || (a < 1) { + return math.NaN(), ErrBounds + } + + prob = 0 + q := 1 - p // probability of failure + + for k := a + 1; k <= b; k++ { + prob = prob + p*math.Pow(q, float64(k-1)) + } + + return prob, nil +} + +// ProbGeom generates the expectation or average number of trials +// for a geometric random variable with parameter p +func ExpGeom(p float64) (exp float64, err error) { + if (p > 1) || (p < 0) { + return math.NaN(), ErrNegative + } + + return 1 / p, nil +} + +// ProbGeom generates the variance for number for a +// geometric random variable with parameter p +func VarGeom(p float64) (exp float64, err error) { + if (p > 1) || (p < 0) { + return math.NaN(), ErrNegative + } + return (1 - p) / math.Pow(p, 2), nil +} diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go new file mode 100644 index 0000000..0f3d1e8 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/legacy.go @@ -0,0 +1,49 @@ +package stats + +// VarP is a shortcut to PopulationVariance +func VarP(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// VarS is a shortcut to SampleVariance +func VarS(input Float64Data) (sdev float64, err error) { + return SampleVariance(input) +} + +// StdDevP is a shortcut to StandardDeviationPopulation +func StdDevP(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StdDevS is a shortcut to StandardDeviationSample +func StdDevS(input Float64Data) (sdev float64, err error) { + return StandardDeviationSample(input) +} + +// LinReg is a shortcut to LinearRegression +func LinReg(s []Coordinate) (regressions []Coordinate, err error) { + return LinearRegression(s) +} + +// ExpReg is a shortcut to ExponentialRegression +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { + return ExponentialRegression(s) +} + +// LogReg is a shortcut to LogarithmicRegression +func LogReg(s []Coordinate) (regressions []Coordinate, err error) { + return LogarithmicRegression(s) +} + +// Legacy error names that didn't start with Err +var ( + EmptyInputErr = ErrEmptyInput + NaNErr = ErrNaN + NegativeErr = ErrNegative + ZeroErr = ErrZero + BoundsErr = ErrBounds + SizeErr = ErrSize + InfValue = ErrInfValue + YCoordErr = ErrYCoord + EmptyInput = ErrEmptyInput +) diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go new file mode 100644 index 0000000..0eb0e27 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/load.go @@ -0,0 +1,199 @@ +package stats + +import ( + "bufio" + "io" + "strconv" + "strings" + "time" +) + +// LoadRawData parses and converts a slice of mixed data types to floats +func LoadRawData(raw interface{}) (f Float64Data) { + var r []interface{} + var s Float64Data + + switch t := raw.(type) { + case []interface{}: + r = t + case []uint: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []bool: + for _, v := range t { + if v { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case []float64: + return Float64Data(t) + case []int: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []string: + for _, v := range t { + r = append(r, v) + } + case []time.Duration: + for _, v := range t { + r = append(r, v) + } + case map[int]int: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]string: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case map[int]uint: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]bool: + for i := 0; i < len(t); i++ { + if t[i] { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case map[int]float64: + for i := 0; i < len(t); i++ { + s = append(s, t[i]) + } + return s + case map[int]time.Duration: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case string: + for _, v := range strings.Fields(t) { + r = append(r, v) + } + case io.Reader: + scanner := bufio.NewScanner(t) + for scanner.Scan() { + l := scanner.Text() + for _, v := range strings.Fields(l) { + r = append(r, v) + } + } + } + + for _, v := range r { + switch t := v.(type) { + case int: + a := float64(t) + f = append(f, a) + case uint: + f = append(f, float64(t)) + case float64: + f = append(f, t) + case string: + fl, err := strconv.ParseFloat(t, 64) + if err == nil { + f = append(f, fl) + } + case bool: + if t { + f = append(f, 1.0) + } else { + f = append(f, 0.0) + } + case time.Duration: + f = append(f, float64(t)) + } + } + return f +} diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go new file mode 100644 index 0000000..bb8c83c --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/max.go @@ -0,0 +1,26 @@ +package stats + +import ( + "math" +) + +// Max finds the highest number in a slice +func Max(input Float64Data) (max float64, err error) { + + // Return an error if there are no numbers + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + max = input.Get(0) + + // Loop and replace higher values + for i := 1; i < input.Len(); i++ { + if input.Get(i) > max { + max = input.Get(i) + } + } + + return max, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go new file mode 100644 index 0000000..a78d299 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mean.go @@ -0,0 +1,60 @@ +package stats + +import "math" + +// Mean gets the average of a slice of numbers +func Mean(input Float64Data) (float64, error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + sum, _ := input.Sum() + + return sum / float64(input.Len()), nil +} + +// GeometricMean gets the geometric mean for a slice of numbers +func GeometricMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the product of all the numbers + var p float64 + for _, n := range input { + if p == 0 { + p = n + } else { + p *= n + } + } + + // Calculate the geometric mean + return math.Pow(p, 1/float64(l)), nil +} + +// HarmonicMean gets the harmonic mean for a slice of numbers +func HarmonicMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the sum of all the numbers reciprocals and return an + // error for values that cannot be included in harmonic mean + var p float64 + for _, n := range input { + if n < 0 { + return math.NaN(), NegativeErr + } else if n == 0 { + return math.NaN(), ZeroErr + } + p += (1 / n) + } + + return float64(l) / p, nil +} diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go new file mode 100644 index 0000000..a678c36 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/median.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// Median gets the median number in a slice of numbers +func Median(input Float64Data) (median float64, err error) { + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // No math is needed if there are no numbers + // For even numbers we add the two middle numbers + // and divide by two using the mean function above + // For odd numbers we just use the middle number + l := len(c) + if l == 0 { + return math.NaN(), EmptyInputErr + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = c[l/2] + } + + return median, nil +} diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go new file mode 100644 index 0000000..bf7e70a --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/min.go @@ -0,0 +1,26 @@ +package stats + +import "math" + +// Min finds the lowest number in a set of data +func Min(input Float64Data) (min float64, err error) { + + // Get the count of numbers in the slice + l := input.Len() + + // Return an error if there are no numbers + if l == 0 { + return math.NaN(), EmptyInputErr + } + + // Get the first value as the starting point + min = input.Get(0) + + // Iterate until done checking for a lower value + for i := 1; i < l; i++ { + if input.Get(i) < min { + min = input.Get(i) + } + } + return min, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go new file mode 100644 index 0000000..a7cf9f7 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mode.go @@ -0,0 +1,47 @@ +package stats + +// Mode gets the mode [most frequent value(s)] of a slice of float64s +func Mode(input Float64Data) (mode []float64, err error) { + // Return the input if there's only one number + l := input.Len() + if l == 1 { + return input, nil + } else if l == 0 { + return nil, EmptyInputErr + } + + c := sortedCopyDif(input) + // Traverse sorted array, + // tracking the longest repeating sequence + mode = make([]float64, 5) + cnt, maxCnt := 1, 1 + for i := 1; i < l; i++ { + switch { + case c[i] == c[i-1]: + cnt++ + case cnt == maxCnt && maxCnt != 1: + mode = append(mode, c[i-1]) + cnt = 1 + case cnt > maxCnt: + mode = append(mode[:0], c[i-1]) + maxCnt, cnt = cnt, 1 + default: + cnt = 1 + } + } + switch { + case cnt == maxCnt: + mode = append(mode, c[l-1]) + case cnt > maxCnt: + mode = append(mode[:0], c[l-1]) + maxCnt = cnt + } + + // Since length must be greater than 1, + // check for slices of distinct values + if maxCnt == 1 || len(mode)*maxCnt == l && maxCnt != l { + return Float64Data{}, nil + } + + return mode, nil +} diff --git a/vendor/github.com/montanaflynn/stats/norm.go b/vendor/github.com/montanaflynn/stats/norm.go new file mode 100644 index 0000000..4eb8eb8 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/norm.go @@ -0,0 +1,254 @@ +package stats + +import ( + "math" + "math/rand" + "strings" + "time" +) + +// NormPpfRvs generates random variates using the Point Percentile Function. +// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/ +func NormPpfRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < size; i++ { + toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale)) + } + return toReturn +} + +// NormBoxMullerRvs generates random variates using the Box–Muller transform. +// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html +func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 { + rand.Seed(time.Now().UnixNano()) + var toReturn []float64 + for i := 0; i < int(float64(size/2)+float64(size%2)); i++ { + // u1 and u2 are uniformly distributed random numbers between 0 and 1. + u1 := rand.Float64() + u2 := rand.Float64() + // x1 and x2 are normally distributed random numbers. + x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2))) + toReturn = append(toReturn, x1) + if (i+1)*2 <= size { + x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2))) + toReturn = append(toReturn, x2) + } + } + return toReturn +} + +// NormPdf is the probability density function. +func NormPdf(x float64, loc float64, scale float64) float64 { + return (math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi)) +} + +// NormLogPdf is the log of the probability density function. +func NormLogPdf(x float64, loc float64, scale float64) float64 { + return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2))/(2*math.Pow(scale, 2)))) / (scale * math.Sqrt(2*math.Pi))) +} + +// NormCdf is the cumulative distribution function. +func NormCdf(x float64, loc float64, scale float64) float64 { + return 0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogCdf is the log of the cumulative distribution function. +func NormLogCdf(x float64, loc float64, scale float64) float64 { + return math.Log(0.5 * (1 + math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate). +func NormSf(x float64, loc float64, scale float64) float64 { + return 1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2)))) +} + +// NormLogSf is the log of the survival function. +func NormLogSf(x float64, loc float64, scale float64) float64 { + return math.Log(1 - 0.5*(1+math.Erf((x-loc)/(scale*math.Sqrt(2))))) +} + +// NormPpf is the point percentile function. +// This is based on Peter John Acklam's inverse normal CDF. +// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible). +// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/ +func NormPpf(p float64, loc float64, scale float64) (x float64) { + const ( + a1 = -3.969683028665376e+01 + a2 = 2.209460984245205e+02 + a3 = -2.759285104469687e+02 + a4 = 1.383577518672690e+02 + a5 = -3.066479806614716e+01 + a6 = 2.506628277459239e+00 + + b1 = -5.447609879822406e+01 + b2 = 1.615858368580409e+02 + b3 = -1.556989798598866e+02 + b4 = 6.680131188771972e+01 + b5 = -1.328068155288572e+01 + + c1 = -7.784894002430293e-03 + c2 = -3.223964580411365e-01 + c3 = -2.400758277161838e+00 + c4 = -2.549732539343734e+00 + c5 = 4.374664141464968e+00 + c6 = 2.938163982698783e+00 + + d1 = 7.784695709041462e-03 + d2 = 3.224671290700398e-01 + d3 = 2.445134137142996e+00 + d4 = 3.754408661907416e+00 + + plow = 0.02425 + phigh = 1 - plow + ) + + if p < 0 || p > 1 { + return math.NaN() + } else if p == 0 { + return -math.Inf(0) + } else if p == 1 { + return math.Inf(0) + } + + if p < plow { + q := math.Sqrt(-2 * math.Log(p)) + x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else if phigh < p { + q := math.Sqrt(-2 * math.Log(1-p)) + x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) / + ((((d1*q+d2)*q+d3)*q+d4)*q + 1) + } else { + q := p - 0.5 + r := q * q + x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q / + (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1) + } + + e := 0.5*math.Erfc(-x/math.Sqrt2) - p + u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2) + x = x - u/(1+x*u/2) + + return x*scale + loc +} + +// NormIsf is the inverse survival function (inverse of sf). +func NormIsf(p float64, loc float64, scale float64) (x float64) { + if -NormPpf(p, loc, scale) == 0 { + return 0 + } + return -NormPpf(p, loc, scale) +} + +// NormMoment approximates the non-central (raw) moment of order n. +// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution +func NormMoment(n int, loc float64, scale float64) float64 { + toReturn := 0.0 + for i := 0; i < n+1; i++ { + if (n-i)%2 == 0 { + toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n-i))) * + (float64(factorial(n-i)) / ((math.Pow(2.0, float64((n-i)/2))) * + float64(factorial((n-i)/2)))) + } + } + return toReturn +} + +// NormStats returns the mean, variance, skew, and/or kurtosis. +// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’). +// Takes string containing any of 'mvsk'. +// Returns array of m v s k in that order. +func NormStats(loc float64, scale float64, moments string) []float64 { + var toReturn []float64 + if strings.ContainsAny(moments, "m") { + toReturn = append(toReturn, loc) + } + if strings.ContainsAny(moments, "v") { + toReturn = append(toReturn, math.Pow(scale, 2)) + } + if strings.ContainsAny(moments, "s") { + toReturn = append(toReturn, 0.0) + } + if strings.ContainsAny(moments, "k") { + toReturn = append(toReturn, 0.0) + } + return toReturn +} + +// NormEntropy is the differential entropy of the RV. +func NormEntropy(loc float64, scale float64) float64 { + return math.Log(scale * math.Sqrt(2*math.Pi*math.E)) +} + +// NormFit returns the maximum likelihood estimators for the Normal Distribution. +// Takes array of float64 values. +// Returns array of Mean followed by Standard Deviation. +func NormFit(data []float64) [2]float64 { + sum := 0.00 + for i := 0; i < len(data); i++ { + sum += data[i] + } + mean := sum / float64(len(data)) + stdNumerator := 0.00 + for i := 0; i < len(data); i++ { + stdNumerator += math.Pow(data[i]-mean, 2) + } + return [2]float64{mean, math.Sqrt((stdNumerator) / (float64(len(data))))} +} + +// NormMedian is the median of the distribution. +func NormMedian(loc float64, scale float64) float64 { + return loc +} + +// NormMean is the mean/expected value of the distribution. +func NormMean(loc float64, scale float64) float64 { + return loc +} + +// NormVar is the variance of the distribution. +func NormVar(loc float64, scale float64) float64 { + return math.Pow(scale, 2) +} + +// NormStd is the standard deviation of the distribution. +func NormStd(loc float64, scale float64) float64 { + return scale +} + +// NormInterval finds endpoints of the range that contains alpha percent of the distribution. +func NormInterval(alpha float64, loc float64, scale float64) [2]float64 { + q1 := (1.0 - alpha) / 2 + q2 := (1.0 + alpha) / 2 + a := NormPpf(q1, loc, scale) + b := NormPpf(q2, loc, scale) + return [2]float64{a, b} +} + +// factorial is the naive factorial algorithm. +func factorial(x int) int { + if x == 0 { + return 1 + } + return x * factorial(x-1) +} + +// Ncr is an N choose R algorithm. +// Aaron Cannon's algorithm. +func Ncr(n, r int) int { + if n <= 1 || r == 0 || n == r { + return 1 + } + if newR := n - r; newR < r { + r = newR + } + if r == 1 { + return n + } + ret := int(n - r + 1) + for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 { + ret = ret * i / j + } + return ret +} diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go new file mode 100644 index 0000000..7c9795b --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/outlier.go @@ -0,0 +1,44 @@ +package stats + +// Outliers holds mild and extreme outliers found in data +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +// QuartileOutliers finds the mild and extreme outliers +func QuartileOutliers(input Float64Data) (Outliers, error) { + if input.Len() == 0 { + return Outliers{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Calculate the quartiles and interquartile range + qs, _ := Quartile(copy) + iqr, _ := InterQuartileRange(copy) + + // Calculate the lower and upper inner and outer fences + lif := qs.Q1 - (1.5 * iqr) + uif := qs.Q3 + (1.5 * iqr) + lof := qs.Q1 - (3 * iqr) + uof := qs.Q3 + (3 * iqr) + + // Find the data points that are outside of the + // inner and upper fences and add them to mild + // and extreme outlier slices + var mild Float64Data + var extreme Float64Data + for _, v := range copy { + + if v < lof || v > uof { + extreme = append(extreme, v) + } else if v < lif || v > uif { + mild = append(mild, v) + } + } + + // Wrap them into our struct + return Outliers{mild, extreme}, nil +} diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go new file mode 100644 index 0000000..f564178 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/percentile.go @@ -0,0 +1,86 @@ +package stats + +import ( + "math" +) + +// Percentile finds the relative standing in a slice of floats +func Percentile(input Float64Data, percent float64) (percentile float64, err error) { + length := input.Len() + if length == 0 { + return math.NaN(), EmptyInputErr + } + + if length == 1 { + return input[0], nil + } + + if percent <= 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Multiply percent by length of input + index := (percent / 100) * float64(len(c)) + + // Check if the index is a whole number + if index == float64(int64(index)) { + + // Convert float to int + i := int(index) + + // Find the value at the index + percentile = c[i-1] + + } else if index > 1 { + + // Convert float to int via truncation + i := int(index) + + // Find the average of the index and following values + percentile, _ = Mean(Float64Data{c[i-1], c[i]}) + + } else { + return math.NaN(), BoundsErr + } + + return percentile, nil + +} + +// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { + + // Find the length of items in the slice + il := input.Len() + + // Return an error for empty slices + if il == 0 { + return math.NaN(), EmptyInputErr + } + + // Return error for less than 0 or greater than 100 percentages + if percent < 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Return the last item + if percent == 100.0 { + return c[il-1], nil + } + + // Find ordinal ranking + or := int(math.Ceil(float64(il) * percent / 100)) + + // Return the item that is in the place of the ordinal rank + if or == 0 { + return c[0], nil + } + return c[or-1], nil + +} diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go new file mode 100644 index 0000000..40bbf6e --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/quartile.go @@ -0,0 +1,74 @@ +package stats + +import "math" + +// Quartiles holds the three quartile points +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +// Quartile returns the three quartile points from a slice of data +func Quartile(input Float64Data) (Quartiles, error) { + + il := input.Len() + if il == 0 { + return Quartiles{}, EmptyInputErr + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Find the cutoff places depeding on if + // the input slice length is even or odd + var c1 int + var c2 int + if il%2 == 0 { + c1 = il / 2 + c2 = il / 2 + } else { + c1 = (il - 1) / 2 + c2 = c1 + 1 + } + + // Find the Medians with the cutoff points + Q1, _ := Median(copy[:c1]) + Q2, _ := Median(copy) + Q3, _ := Median(copy[c2:]) + + return Quartiles{Q1, Q2, Q3}, nil + +} + +// InterQuartileRange finds the range between Q1 and Q3 +func InterQuartileRange(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + iqr := qs.Q3 - qs.Q1 + return iqr, nil +} + +// Midhinge finds the average of the first and third quartiles +func Midhinge(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + qs, _ := Quartile(input) + mh := (qs.Q1 + qs.Q3) / 2 + return mh, nil +} + +// Trimean finds the average of the median and the midhinge +func Trimean(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + c := sortedCopy(input) + q, _ := Quartile(c) + + return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil +} diff --git a/vendor/github.com/montanaflynn/stats/ranksum.go b/vendor/github.com/montanaflynn/stats/ranksum.go new file mode 100644 index 0000000..fc424ef --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/ranksum.go @@ -0,0 +1,183 @@ +package stats + +// import "math" +// +// // WilcoxonRankSum tests the null hypothesis that two sets +// // of data are drawn from the same distribution. It does +// // not handle ties between measurements in x and y. +// // +// // Parameters: +// // data1 Float64Data: First set of data points. +// // data2 Float64Data: Second set of data points. +// // Length of both data samples must be equal. +// // +// // Return: +// // statistic float64: The test statistic under the +// // large-sample approximation that the +// // rank sum statistic is normally distributed. +// // pvalue float64: The two-sided p-value of the test +// // err error: Any error from the input data parameters +// // +// // https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test +// func WilcoxonRankSum(data1, data2 Float64Data) (float64, float64, error) { +// +// l1 := data1.Len() +// l2 := data2.Len() +// +// if l1 == 0 || l2 == 0 { +// return math.NaN(), math.NaN(), EmptyInputErr +// } +// +// if l1 != l2 { +// return math.NaN(), math.NaN(), SizeErr +// } +// +// alldata := Float64Data{} +// alldata = append(alldata, data1...) +// alldata = append(alldata, data2...) +// +// // ranked := +// +// return 0.0, 0.0, nil +// } +// +// // x, y = map(np.asarray, (x, y)) +// // n1 = len(x) +// // n2 = len(y) +// // alldata = np.concatenate((x, y)) +// // ranked = rankdata(alldata) +// // x = ranked[:n1] +// // s = np.sum(x, axis=0) +// // expected = n1 * (n1+n2+1) / 2.0 +// // z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) +// // prob = 2 * distributions.norm.sf(abs(z)) +// // +// // return RanksumsResult(z, prob) +// +// // def rankdata(a, method='average'): +// // """ +// // Assign ranks to data, dealing with ties appropriately. +// // Ranks begin at 1. The `method` argument controls how ranks are assigned +// // to equal values. See [1]_ for further discussion of ranking methods. +// // Parameters +// // ---------- +// // a : array_like +// // The array of values to be ranked. The array is first flattened. +// // method : str, optional +// // The method used to assign ranks to tied elements. +// // The options are 'average', 'min', 'max', 'dense' and 'ordinal'. +// // 'average': +// // The average of the ranks that would have been assigned to +// // all the tied values is assigned to each value. +// // 'min': +// // The minimum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. (This is also +// // referred to as "competition" ranking.) +// // 'max': +// // The maximum of the ranks that would have been assigned to all +// // the tied values is assigned to each value. +// // 'dense': +// // Like 'min', but the rank of the next highest element is assigned +// // the rank immediately after those assigned to the tied elements. +// // 'ordinal': +// // All values are given a distinct rank, corresponding to the order +// // that the values occur in `a`. +// // The default is 'average'. +// // Returns +// // ------- +// // ranks : ndarray +// // An array of length equal to the size of `a`, containing rank +// // scores. +// // References +// // ---------- +// // .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking +// // Examples +// // -------- +// // >>> from scipy.stats import rankdata +// // >>> rankdata([0, 2, 3, 2]) +// // array([ 1. , 2.5, 4. , 2.5]) +// // """ +// // +// // arr = np.ravel(np.asarray(a)) +// // algo = 'quicksort' +// // sorter = np.argsort(arr, kind=algo) +// // +// // inv = np.empty(sorter.size, dtype=np.intp) +// // inv[sorter] = np.arange(sorter.size, dtype=np.intp) +// // +// // +// // arr = arr[sorter] +// // obs = np.r_[True, arr[1:] != arr[:-1]] +// // dense = obs.cumsum()[inv] +// // +// // +// // # cumulative counts of each unique value +// // count = np.r_[np.nonzero(obs)[0], len(obs)] +// // +// // # average method +// // return .5 * (count[dense] + count[dense - 1] + 1) +// +// type rankable interface { +// Len() int +// RankEqual(int, int) bool +// } +// +// func StandardRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k = i + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func ModifiedRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// k := i + 1 +// for j := i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// k = j + 1 +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func DenseRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// var k int +// for i := range r { +// if i == 0 || !d.RankEqual(i, i-1) { +// k++ +// } +// r[i] = float64(k) +// } +// return r +// } +// +// func OrdinalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := range r { +// r[i] = float64(i + 1) +// } +// return r +// } +// +// func FractionalRank(d rankable) []float64 { +// r := make([]float64, d.Len()) +// for i := 0; i < len(r); { +// var j int +// f := float64(i + 1) +// for j = i + 1; j < len(r) && d.RankEqual(i, j); j++ { +// f += float64(j + 1) +// } +// f /= float64(j - i) +// for ; i < j; i++ { +// r[i] = f +// } +// } +// return r +// } diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go new file mode 100644 index 0000000..401d951 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/regression.go @@ -0,0 +1,113 @@ +package stats + +import "math" + +// Series is a container for a series of data +type Series []Coordinate + +// Coordinate holds the data in a series +type Coordinate struct { + X, Y float64 +} + +// LinearRegression finds the least squares linear regression on data series +func LinearRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + // Placeholder for the math to be done + var sum [5]float64 + + // Loop over data keeping index in place + i := 0 + for ; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X + sum[3] += s[i].X * s[i].Y + sum[4] += s[i].Y * s[i].Y + } + + // Find gradient and intercept + f := float64(i) + gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) + intercept := (sum[1] / f) - (gradient * sum[0] / f) + + // Create the new regression series + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: s[j].X*gradient + intercept, + }) + } + + return regressions, nil +} + +// ExponentialRegression returns an exponential regression on data series +func ExponentialRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [6]float64 + + for i := 0; i < len(s); i++ { + if s[i].Y < 0 { + return nil, YCoordErr + } + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X * s[i].Y + sum[3] += s[i].Y * math.Log(s[i].Y) + sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) + sum[5] += s[i].X * s[i].Y + } + + denominator := (sum[1]*sum[2] - sum[5]*sum[5]) + a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) + b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: a * math.Exp(b*s[j].X), + }) + } + + return regressions, nil +} + +// LogarithmicRegression returns an logarithmic regression on data series +func LogarithmicRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInputErr + } + + var sum [4]float64 + + i := 0 + for ; i < len(s); i++ { + sum[0] += math.Log(s[i].X) + sum[1] += s[i].Y * math.Log(s[i].X) + sum[2] += s[i].Y + sum[3] += math.Pow(math.Log(s[i].X), 2) + } + + f := float64(i) + a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) + b := (sum[2] - a*sum[0]) / f + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: b + a*math.Log(s[j].X), + }) + } + + return regressions, nil +} diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go new file mode 100644 index 0000000..b66779c --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/round.go @@ -0,0 +1,38 @@ +package stats + +import "math" + +// Round a float to a specific decimal place or precision +func Round(input float64, places int) (rounded float64, err error) { + + // If the float is not a number + if math.IsNaN(input) { + return math.NaN(), NaNErr + } + + // Find out the actual sign and correct the input for later + sign := 1.0 + if input < 0 { + sign = -1 + input *= -1 + } + + // Use the places arg to get the amount of precision wanted + precision := math.Pow(10, float64(places)) + + // Find the decimal place we are looking to round + digit := input * precision + + // Get the actual decimal number as a fraction to be compared + _, decimal := math.Modf(digit) + + // If the decimal is less than .5 we round down otherwise up + if decimal >= 0.5 { + rounded = math.Ceil(digit) + } else { + rounded = math.Floor(digit) + } + + // Finally we do the math to actually create a rounded number + return rounded / precision * sign, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go new file mode 100644 index 0000000..40166af --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sample.go @@ -0,0 +1,76 @@ +package stats + +import ( + "math/rand" + "sort" +) + +// Sample returns sample from input with replacement or without +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { + + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + if replacement { + + result := Float64Data{} + rand.Seed(unixnano()) + + // In every step, randomly take the num for + for i := 0; i < takenum; i++ { + idx := rand.Intn(length) + result = append(result, input[idx]) + } + + return result, nil + + } else if !replacement && takenum <= length { + + rand.Seed(unixnano()) + + // Get permutation of number of indexies + perm := rand.Perm(length) + result := Float64Data{} + + // Get element of input by permutated index + for _, idx := range perm[0:takenum] { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} + +// StableSample like stable sort, it returns samples from input while keeps the order of original data. +func StableSample(input Float64Data, takenum int) ([]float64, error) { + if input.Len() == 0 { + return nil, EmptyInputErr + } + + length := input.Len() + + if takenum <= length { + + rand.Seed(unixnano()) + + perm := rand.Perm(length) + perm = perm[0:takenum] + // Sort perm before applying + sort.Ints(perm) + result := Float64Data{} + + for _, idx := range perm { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} diff --git a/vendor/github.com/montanaflynn/stats/sigmoid.go b/vendor/github.com/montanaflynn/stats/sigmoid.go new file mode 100644 index 0000000..5f2559d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sigmoid.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sigmoid returns the input values in the range of -1 to 1 +// along the sigmoid or s-shaped curve, commonly used in +// machine learning while training neural networks as an +// activation function. +func Sigmoid(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + s := make([]float64, len(input)) + for i, v := range input { + s[i] = 1 / (1 + math.Exp(-v)) + } + return s, nil +} diff --git a/vendor/github.com/montanaflynn/stats/softmax.go b/vendor/github.com/montanaflynn/stats/softmax.go new file mode 100644 index 0000000..8507264 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/softmax.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// SoftMax returns the input values in the range of 0 to 1 +// with sum of all the probabilities being equal to one. It +// is commonly used in machine learning neural networks. +func SoftMax(input Float64Data) ([]float64, error) { + if input.Len() == 0 { + return Float64Data{}, EmptyInput + } + + s := 0.0 + c, _ := Max(input) + for _, e := range input { + s += math.Exp(e - c) + } + + sm := make([]float64, len(input)) + for i, v := range input { + sm[i] = math.Exp(v-c) / s + } + + return sm, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go new file mode 100644 index 0000000..15b611d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sum.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sum adds all the numbers of a slice together +func Sum(input Float64Data) (sum float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Add em up + for _, n := range input { + sum += n + } + + return sum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go new file mode 100644 index 0000000..8819976 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/util.go @@ -0,0 +1,43 @@ +package stats + +import ( + "sort" + "time" +) + +// float64ToInt rounds a float64 to an int +func float64ToInt(input float64) (output int) { + r, _ := Round(input, 0) + return int(r) +} + +// unixnano returns nanoseconds from UTC epoch +func unixnano() int64 { + return time.Now().UTC().UnixNano() +} + +// copyslice copies a slice of float64s +func copyslice(input Float64Data) Float64Data { + s := make(Float64Data, input.Len()) + copy(s, input) + return s +} + +// sortedCopy returns a sorted copy of float64s +func sortedCopy(input Float64Data) (copy Float64Data) { + copy = copyslice(input) + sort.Float64s(copy) + return +} + +// sortedCopyDif returns a sorted copy of float64s +// only if the original data isn't sorted. +// Only use this if returned slice won't be manipulated! +func sortedCopyDif(input Float64Data) (copy Float64Data) { + if sort.Float64sAreSorted(input) { + return input + } + copy = copyslice(input) + sort.Float64s(copy) + return +} diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go new file mode 100644 index 0000000..a644569 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/variance.go @@ -0,0 +1,105 @@ +package stats + +import "math" + +// _variance finds the variance for both population and sample data +func _variance(input Float64Data, sample int) (variance float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInputErr + } + + // Sum the square of the mean subtracted from each number + m, _ := Mean(input) + + for _, n := range input { + variance += (n - m) * (n - m) + } + + // When getting the mean of the squared differences + // "sample" will allow us to know if it's a sample + // or population and wether to subtract by one or not + return variance / float64((input.Len() - (1 * sample))), nil +} + +// Variance the amount of variation in the dataset +func Variance(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// PopulationVariance finds the amount of variance within a population +func PopulationVariance(input Float64Data) (pvar float64, err error) { + + v, err := _variance(input, 0) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// SampleVariance finds the amount of variance within a sample +func SampleVariance(input Float64Data) (svar float64, err error) { + + v, err := _variance(input, 1) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// Covariance is a measure of how much two sets of data change +func Covariance(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + // Calculate sum of squares + var ss float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + ss += (delta1*delta2 - ss) / float64(i+1) + } + + return ss * float64(l1) / float64(l1-1), nil +} + +// CovariancePopulation computes covariance for entire population between two variables. +func CovariancePopulation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInputErr + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + var s float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + s += delta1 * delta2 + } + + return s / float64(l1), nil +} diff --git a/vendor/github.com/power-devops/perfstat/config.go b/vendor/github.com/power-devops/perfstat/config.go index de7230d..a6df39c 100644 --- a/vendor/github.com/power-devops/perfstat/config.go +++ b/vendor/github.com/power-devops/perfstat/config.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/cpustat.go b/vendor/github.com/power-devops/perfstat/cpustat.go index 902727f..d456e68 100644 --- a/vendor/github.com/power-devops/perfstat/cpustat.go +++ b/vendor/github.com/power-devops/perfstat/cpustat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -20,6 +21,13 @@ import ( "unsafe" ) +var old_cpu_total_stat *C.perfstat_cpu_total_t + +func init() { + old_cpu_total_stat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t)) + C.perfstat_cpu_total(nil, old_cpu_total_stat, C.sizeof_perfstat_cpu_total_t, 1) +} + func CpuStat() ([]CPU, error) { var cpustat *C.perfstat_cpu_t var cpu C.perfstat_id_t @@ -96,3 +104,36 @@ func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { u := perfstatcpuutil2cpuutil(cpuutil) return &u, nil } + +func CpuUtilTotalStat() (*CPUUtil, error) { + var cpuutil *C.perfstat_cpu_util_t + var new_cpu_total_stat *C.perfstat_cpu_total_t + var data C.perfstat_rawdata_t + + new_cpu_total_stat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t)) + cpuutil = (*C.perfstat_cpu_util_t)(C.malloc(C.sizeof_perfstat_cpu_util_t)) + defer C.free(unsafe.Pointer(cpuutil)) + + r := C.perfstat_cpu_total(nil, new_cpu_total_stat, C.sizeof_perfstat_cpu_total_t, 1) + if r <= 0 { + C.free(unsafe.Pointer(new_cpu_total_stat)) + return nil, fmt.Errorf("error perfstat_cpu_total()") + } + + data._type = C.UTIL_CPU_TOTAL + data.curstat = unsafe.Pointer(new_cpu_total_stat) + data.prevstat = unsafe.Pointer(old_cpu_total_stat) + data.sizeof_data = C.sizeof_perfstat_cpu_total_t + data.cur_elems = 1 + data.prev_elems = 1 + + r = C.perfstat_cpu_util(&data, cpuutil, C.sizeof_perfstat_cpu_util_t, 1) + C.free(unsafe.Pointer(old_cpu_total_stat)) + old_cpu_total_stat = new_cpu_total_stat + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu_util()") + } + u := perfstatcpuutil2cpuutil(cpuutil) + return &u, nil +} + diff --git a/vendor/github.com/power-devops/perfstat/diskstat.go b/vendor/github.com/power-devops/perfstat/diskstat.go index fc70dfa..06763b4 100644 --- a/vendor/github.com/power-devops/perfstat/diskstat.go +++ b/vendor/github.com/power-devops/perfstat/diskstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/doc.go b/vendor/github.com/power-devops/perfstat/doc.go index 85eaf3e..a0439c5 100644 --- a/vendor/github.com/power-devops/perfstat/doc.go +++ b/vendor/github.com/power-devops/perfstat/doc.go @@ -1,3 +1,4 @@ +//go:build !aix // +build !aix // Copyright 2020 Power-Devops.com. All rights reserved. diff --git a/vendor/github.com/power-devops/perfstat/fsstat.go b/vendor/github.com/power-devops/perfstat/fsstat.go index 27f4c06..d391319 100644 --- a/vendor/github.com/power-devops/perfstat/fsstat.go +++ b/vendor/github.com/power-devops/perfstat/fsstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/helpers.go b/vendor/github.com/power-devops/perfstat/helpers.go index e8d6997..654cdcf 100644 --- a/vendor/github.com/power-devops/perfstat/helpers.go +++ b/vendor/github.com/power-devops/perfstat/helpers.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -754,7 +755,7 @@ func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem { i.Device = C.GoString(n.devname) i.MountPoint = C.GoString(n.fsname) i.FSType = int(n.fstype) - i.Flags = int(n.flags) + i.Flags = uint(n.flags) i.TotalBlocks = int64(n.totalblks) i.FreeBlocks = int64(n.freeblks) i.TotalInodes = int64(n.totalinodes) diff --git a/vendor/github.com/power-devops/perfstat/lparstat.go b/vendor/github.com/power-devops/perfstat/lparstat.go index 0ce35e3..06f79fd 100644 --- a/vendor/github.com/power-devops/perfstat/lparstat.go +++ b/vendor/github.com/power-devops/perfstat/lparstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/lvmstat.go b/vendor/github.com/power-devops/perfstat/lvmstat.go index eb2064c..2ce9908 100644 --- a/vendor/github.com/power-devops/perfstat/lvmstat.go +++ b/vendor/github.com/power-devops/perfstat/lvmstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/memstat.go b/vendor/github.com/power-devops/perfstat/memstat.go index d211a73..52133f0 100644 --- a/vendor/github.com/power-devops/perfstat/memstat.go +++ b/vendor/github.com/power-devops/perfstat/memstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/netstat.go b/vendor/github.com/power-devops/perfstat/netstat.go index 4070da2..847d294 100644 --- a/vendor/github.com/power-devops/perfstat/netstat.go +++ b/vendor/github.com/power-devops/perfstat/netstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/procstat.go b/vendor/github.com/power-devops/perfstat/procstat.go index ecafebd..957ec2b 100644 --- a/vendor/github.com/power-devops/perfstat/procstat.go +++ b/vendor/github.com/power-devops/perfstat/procstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/sysconf.go b/vendor/github.com/power-devops/perfstat/sysconf.go index c7454d0..b557da0 100644 --- a/vendor/github.com/power-devops/perfstat/sysconf.go +++ b/vendor/github.com/power-devops/perfstat/sysconf.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/systemcfg.go b/vendor/github.com/power-devops/perfstat/systemcfg.go index 6287eb4..7f9277b 100644 --- a/vendor/github.com/power-devops/perfstat/systemcfg.go +++ b/vendor/github.com/power-devops/perfstat/systemcfg.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/types_fs.go b/vendor/github.com/power-devops/perfstat/types_fs.go index 0be048a..b4b43ac 100644 --- a/vendor/github.com/power-devops/perfstat/types_fs.go +++ b/vendor/github.com/power-devops/perfstat/types_fs.go @@ -8,7 +8,7 @@ type FileSystem struct { Device string /* name of the mounted device */ MountPoint string /* where the device is mounted */ FSType int /* File system type, see the constants below */ - Flags int /* Flags of the file system */ + Flags uint /* Flags of the file system */ TotalBlocks int64 /* number of 512 bytes blocks in the filesystem */ FreeBlocks int64 /* number of free 512 bytes block in the filesystem */ TotalInodes int64 /* total number of inodes in the filesystem */ diff --git a/vendor/github.com/power-devops/perfstat/uptime.go b/vendor/github.com/power-devops/perfstat/uptime.go index 2bd3e56..8608787 100644 --- a/vendor/github.com/power-devops/perfstat/uptime.go +++ b/vendor/github.com/power-devops/perfstat/uptime.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go index 411a063..83bc23d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "math" + "runtime" "strconv" "strings" "sync" @@ -86,10 +87,12 @@ func (c TimesStat) String() string { return `{` + strings.Join(v, ",") + `}` } -// Total returns the total number of seconds in a CPUTimesStat +// Deprecated: Total returns the total number of seconds in a CPUTimesStat +// Please do not use this internal function. func (c TimesStat) Total() float64 { - total := c.User + c.System + c.Nice + c.Iowait + c.Irq + c.Softirq + - c.Steal + c.Idle + total := c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + + c.Softirq + c.Steal + c.Guest + c.GuestNice + return total } @@ -99,9 +102,15 @@ func (c InfoStat) String() string { } func getAllBusy(t TimesStat) (float64, float64) { - busy := t.User + t.System + t.Nice + t.Iowait + t.Irq + - t.Softirq + t.Steal - return busy + t.Idle, busy + tot := t.Total() + if runtime.GOOS == "linux" { + tot -= t.Guest // Linux 2.6.24+ + tot -= t.GuestNice // Linux 3.2.0+ + } + + busy := tot - t.Idle - t.Iowait + + return tot, busy } func calculateBusy(t1, t2 TimesStat) float64 { @@ -142,11 +151,11 @@ func Percent(interval time.Duration, percpu bool) ([]float64, error) { func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { if interval <= 0 { - return percentUsedFromLastCall(percpu) + return percentUsedFromLastCallWithContext(ctx, percpu) } // Get CPU usage at the start of the interval. - cpuTimes1, err := Times(percpu) + cpuTimes1, err := TimesWithContext(ctx, percpu) if err != nil { return nil, err } @@ -156,7 +165,7 @@ func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool } // And at the end of the interval. - cpuTimes2, err := Times(percpu) + cpuTimes2, err := TimesWithContext(ctx, percpu) if err != nil { return nil, err } @@ -165,7 +174,11 @@ func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool } func percentUsedFromLastCall(percpu bool) ([]float64, error) { - cpuTimes, err := Times(percpu) + return percentUsedFromLastCallWithContext(context.Background(), percpu) +} + +func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]float64, error) { + cpuTimes, err := TimesWithContext(ctx, percpu) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go index 8f35bdc..1439d1d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go @@ -5,70 +5,12 @@ package cpu import ( "context" - - "github.com/power-devops/perfstat" ) func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - var ret []TimesStat - if percpu { - cpus, err := perfstat.CpuStat() - if err != nil { - return nil, err - } - for _, c := range cpus { - ct := &TimesStat{ - CPU: c.Name, - Idle: float64(c.Idle), - User: float64(c.User), - System: float64(c.Sys), - Iowait: float64(c.Wait), - } - ret = append(ret, *ct) - } - } else { - c, err := perfstat.CpuUtilTotalStat() - if err != nil { - return nil, err - } - ct := &TimesStat{ - CPU: "cpu-total", - Idle: float64(c.IdlePct), - User: float64(c.UserPct), - System: float64(c.KernPct), - Iowait: float64(c.WaitPct), - } - ret = append(ret, *ct) - } - return ret, nil -} - func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - c, err := perfstat.CpuTotalStat() - if err != nil { - return nil, err - } - info := InfoStat{ - CPU: 0, - Mhz: float64(c.ProcessorHz / 1000000), - Cores: int32(c.NCpusCfg), - } - result := []InfoStat{info} - return result, nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - c, err := perfstat.CpuTotalStat() - if err != nil { - return 0, err - } - return c.NCpusCfg, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go new file mode 100644 index 0000000..9c1e70b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go @@ -0,0 +1,66 @@ +//go:build aix && cgo +// +build aix,cgo + +package cpu + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + cpus, err := perfstat.CpuStat() + if err != nil { + return nil, err + } + for _, c := range cpus { + ct := &TimesStat{ + CPU: c.Name, + Idle: float64(c.Idle), + User: float64(c.User), + System: float64(c.Sys), + Iowait: float64(c.Wait), + } + ret = append(ret, *ct) + } + } else { + c, err := perfstat.CpuUtilTotalStat() + if err != nil { + return nil, err + } + ct := &TimesStat{ + CPU: "cpu-total", + Idle: float64(c.IdlePct), + User: float64(c.UserPct), + System: float64(c.KernPct), + Iowait: float64(c.WaitPct), + } + ret = append(ret, *ct) + } + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + c, err := perfstat.CpuTotalStat() + if err != nil { + return nil, err + } + info := InfoStat{ + CPU: 0, + Mhz: float64(c.ProcessorHz / 1000000), + Cores: int32(c.NCpusCfg), + } + result := []InfoStat{info} + return result, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + c, err := perfstat.CpuTotalStat() + if err != nil { + return 0, err + } + return c.NCpusCfg, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go new file mode 100644 index 0000000..d158000 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go @@ -0,0 +1,95 @@ +//go:build aix && !cgo +// +build aix,!cgo + +package cpu + +import ( + "context" + "regexp" + "strings" + "strconv" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +var whiteSpaces = regexp.MustCompile(`\s+`) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return []TimesStat{}, common.ErrNotImplementedError + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ret := TimesStat{CPU: "cpu-total"} + h := whiteSpaces.Split(lines[len(lines)-3], -1) // headers + v := whiteSpaces.Split(lines[len(lines)-2], -1) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ret.User = t + case `%sys`: + ret.System = t + case `%wio`: + ret.Iowait = t + case `%idle`: + ret.Idle = t + } + } + } + + return []TimesStat{ret}, nil + } +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "Number Of Processors:") { + p := whiteSpaces.Split(line, 4) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + } else if strings.HasPrefix(line, "Processor Clock Speed:") { + p := whiteSpaces.Split(line, 5) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + break + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go index 4f26230..a60e462 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go @@ -17,6 +17,71 @@ import ( var ClocksPerSec = float64(100) +var armModelToModelName = map[uint64]string{ + 0x810: "ARM810", + 0x920: "ARM920", + 0x922: "ARM922", + 0x926: "ARM926", + 0x940: "ARM940", + 0x946: "ARM946", + 0x966: "ARM966", + 0xa20: "ARM1020", + 0xa22: "ARM1022", + 0xa26: "ARM1026", + 0xb02: "ARM11 MPCore", + 0xb36: "ARM1136", + 0xb56: "ARM1156", + 0xb76: "ARM1176", + 0xc05: "Cortex-A5", + 0xc07: "Cortex-A7", + 0xc08: "Cortex-A8", + 0xc09: "Cortex-A9", + 0xc0d: "Cortex-A17", + 0xc0f: "Cortex-A15", + 0xc0e: "Cortex-A17", + 0xc14: "Cortex-R4", + 0xc15: "Cortex-R5", + 0xc17: "Cortex-R7", + 0xc18: "Cortex-R8", + 0xc20: "Cortex-M0", + 0xc21: "Cortex-M1", + 0xc23: "Cortex-M3", + 0xc24: "Cortex-M4", + 0xc27: "Cortex-M7", + 0xc60: "Cortex-M0+", + 0xd01: "Cortex-A32", + 0xd02: "Cortex-A34", + 0xd03: "Cortex-A53", + 0xd04: "Cortex-A35", + 0xd05: "Cortex-A55", + 0xd06: "Cortex-A65", + 0xd07: "Cortex-A57", + 0xd08: "Cortex-A72", + 0xd09: "Cortex-A73", + 0xd0a: "Cortex-A75", + 0xd0b: "Cortex-A76", + 0xd0c: "Neoverse-N1", + 0xd0d: "Cortex-A77", + 0xd0e: "Cortex-A76AE", + 0xd13: "Cortex-R52", + 0xd20: "Cortex-M23", + 0xd21: "Cortex-M33", + 0xd40: "Neoverse-V1", + 0xd41: "Cortex-A78", + 0xd42: "Cortex-A78AE", + 0xd43: "Cortex-A65AE", + 0xd44: "Cortex-X1", + 0xd46: "Cortex-A510", + 0xd47: "Cortex-A710", + 0xd48: "Cortex-X2", + 0xd49: "Neoverse-N2", + 0xd4a: "Neoverse-E1", + 0xd4b: "Cortex-A78C", + 0xd4c: "Cortex-X1C", + 0xd4d: "Cortex-A715", + 0xd4e: "Cortex-X3", +} + func init() { clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) // ignore errors @@ -177,6 +242,17 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.Family = value case "model", "CPU part": c.Model = value + // if CPU is arm based, model name is found via model number. refer to: arch/arm64/kernel/cpuinfo.c + if c.VendorID == "ARM" { + if v, err := strconv.ParseUint(c.Model, 0, 16); err == nil { + modelName, exist := armModelToModelName[v] + if exist { + c.ModelName = modelName + } else { + c.ModelName = "Undefined" + } + } + } case "model name", "cpu": c.ModelName = value if strings.Contains(value, "POWER8") || diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go index 701d45c..fe33290 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go @@ -4,141 +4,99 @@ package cpu import ( - "bytes" "context" - "encoding/binary" "fmt" "runtime" - "strconv" - "strings" - "syscall" + "unsafe" "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) -// sys/sched.h -var ( - CPUser = 0 - cpNice = 1 - cpSys = 2 - cpIntr = 3 - cpIdle = 4 - cpUStates = 5 -) - -// sys/sysctl.h const ( - ctlKern = 1 // "high kernel": proc, limits - ctlHw = 6 // CTL_HW - sMT = 24 // HW_sMT - kernCptime = 40 // KERN_CPTIME - kernCptime2 = 71 // KERN_CPTIME2 + // sys/sched.h + cpuOnline = 0x0001 // CPUSTATS_ONLINE + + // sys/sysctl.h + ctlKern = 1 // "high kernel": proc, limits + ctlHw = 6 // CTL_HW + smt = 24 // HW_SMT + kernCpTime = 40 // KERN_CPTIME + kernCPUStats = 85 // KERN_CPUSTATS ) var ClocksPerSec = float64(128) +type cpuStats struct { + // cs_time[CPUSTATES] + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 + + // cs_flags + Flags uint64 +} + func init() { clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) // ignore errors if err == nil { ClocksPerSec = float64(clkTck) } - - func() { - v, err := unix.Sysctl("kern.osrelease") // can't reuse host.PlatformInformation because of circular import - if err != nil { - return - } - v = strings.ToLower(v) - version, err := strconv.ParseFloat(v, 64) - if err != nil { - return - } - if version >= 6.4 { - cpIntr = 4 - cpIdle = 5 - cpUStates = 6 - } - }() -} - -func smt() (bool, error) { - mib := []int32{ctlHw, sMT} - buf, _, err := common.CallSyscall(mib) - if err != nil { - return false, err - } - - var ret bool - br := bytes.NewReader(buf) - if err := binary.Read(br, binary.LittleEndian, &ret); err != nil { - return false, err - } - - return ret, nil } func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - var ret []TimesStat - - var ncpu int - if percpu { - ncpu, _ = Counts(true) - } else { - ncpu = 1 +func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { + if !percpu { + mib := []int32{ctlKern, kernCpTime} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + stat := TimesStat{ + CPU: "cpu-total", + User: float64(times.User) / ClocksPerSec, + Nice: float64(times.Nice) / ClocksPerSec, + System: float64(times.Sys) / ClocksPerSec, + Idle: float64(times.Idle) / ClocksPerSec, + Irq: float64(times.Intr) / ClocksPerSec, + } + return []TimesStat{stat}, nil } - smt, err := smt() - if err == syscall.EOPNOTSUPP { - // if hw.smt is not applicable for this platform (e.g. i386), - // pretend it's enabled - smt = true - } else if err != nil { - return nil, err + ncpu, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return } - for i := 0; i < ncpu; i++ { - j := i - if !smt { - j *= 2 - } - - cpuTimes := make([]int32, cpUStates) - var mib []int32 - if percpu { - mib = []int32{ctlKern, kernCptime2, int32(j)} - } else { - mib = []int32{ctlKern, kernCptime} - } + var i uint32 + for i = 0; i < ncpu; i++ { + mib := []int32{ctlKern, kernCPUStats, int32(i)} buf, _, err := common.CallSyscall(mib) if err != nil { return ret, err } - br := bytes.NewReader(buf) - err = binary.Read(br, binary.LittleEndian, &cpuTimes) - if err != nil { - return ret, err - } - c := TimesStat{ - User: float64(cpuTimes[CPUser]) / ClocksPerSec, - Nice: float64(cpuTimes[cpNice]) / ClocksPerSec, - System: float64(cpuTimes[cpSys]) / ClocksPerSec, - Idle: float64(cpuTimes[cpIdle]) / ClocksPerSec, - Irq: float64(cpuTimes[cpIntr]) / ClocksPerSec, - } - if percpu { - c.CPU = fmt.Sprintf("cpu%d", j) - } else { - c.CPU = "cpu-total" + stats := (*cpuStats)(unsafe.Pointer(&buf[0])) + if (stats.Flags & cpuOnline) == 0 { + continue } - ret = append(ret, c) + ret = append(ret, TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(stats.User) / ClocksPerSec, + Nice: float64(stats.Nice) / ClocksPerSec, + System: float64(stats.Sys) / ClocksPerSec, + Idle: float64(stats.Idle) / ClocksPerSec, + Irq: float64(stats.Intr) / ClocksPerSec, + }) } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go new file mode 100644 index 0000000..5e87839 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go @@ -0,0 +1,10 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go new file mode 100644 index 0000000..d659058 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go @@ -0,0 +1,10 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go new file mode 100644 index 0000000..5e87839 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go @@ -0,0 +1,10 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go new file mode 100644 index 0000000..d659058 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,10 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go index b5ee70f..f828c84 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "os/exec" "regexp" "runtime" "sort" @@ -38,9 +37,9 @@ func Times(percpu bool) ([]TimesStat, error) { } func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - kstatSys, err := exec.LookPath("kstat") + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") if err != nil { - return nil, fmt.Errorf("cannot find kstat: %s", err) + return nil, fmt.Errorf("cannot execute kstat: %s", err) } cpu := make(map[float64]float64) idle := make(map[float64]float64) @@ -48,10 +47,6 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { kern := make(map[float64]float64) iowt := make(map[float64]float64) // swap := make(map[float64]float64) - kstatSysOut, err := invoke.CommandWithContext(ctx, kstatSys, "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") - if err != nil { - return nil, fmt.Errorf("cannot execute kstat: %s", err) - } re := regexp.MustCompile(`[:\s]+`) for _, line := range strings.Split(string(kstatSysOut), "\n") { fields := re.Split(line, -1) @@ -122,27 +117,19 @@ func Info() ([]InfoStat, error) { } func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - psrInfo, err := exec.LookPath("psrinfo") - if err != nil { - return nil, fmt.Errorf("cannot find psrinfo: %s", err) - } - psrInfoOut, err := invoke.CommandWithContext(ctx, psrInfo, "-p", "-v") + psrInfoOut, err := invoke.CommandWithContext(ctx, "psrinfo", "-p", "-v") if err != nil { return nil, fmt.Errorf("cannot execute psrinfo: %s", err) } - isaInfo, err := exec.LookPath("isainfo") - if err != nil { - return nil, fmt.Errorf("cannot find isainfo: %s", err) - } - isaInfoOut, err := invoke.CommandWithContext(ctx, isaInfo, "-b", "-v") + procs, err := parseProcessorInfo(string(psrInfoOut)) if err != nil { - return nil, fmt.Errorf("cannot execute isainfo: %s", err) + return nil, fmt.Errorf("error parsing psrinfo output: %s", err) } - procs, err := parseProcessorInfo(string(psrInfoOut)) + isaInfoOut, err := invoke.CommandWithContext(ctx, "isainfo", "-b", "-v") if err != nil { - return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + return nil, fmt.Errorf("cannot execute isainfo: %s", err) } flags, err := parseISAInfo(string(isaInfoOut)) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go index d1a0e4c..e10612f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go @@ -14,8 +14,7 @@ import ( ) var ( - procGetActiveProcessorCount = common.Modkernel32.NewProc("GetActiveProcessorCount") - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") ) type win32_Processor struct { @@ -204,15 +203,12 @@ type systemInfo struct { func CountsWithContext(ctx context.Context, logical bool) (int, error) { if logical { // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 - err := procGetActiveProcessorCount.Find() - if err == nil { // Win7+ - ret, _, _ := procGetActiveProcessorCount.Call(uintptr(0xffff)) // ALL_PROCESSOR_GROUPS is 0xffff according to Rust's winapi lib https://docs.rs/winapi/*/x86_64-pc-windows-msvc/src/winapi/shared/ntdef.rs.html#120 - if ret != 0 { - return int(ret), nil - } + ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) + if ret != 0 { + return int(ret), nil } var systemInfo systemInfo - _, _, err = procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) if systemInfo.dwNumberOfProcessors == 0 { return 0, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go index e37686b..bc71712 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix.go @@ -5,81 +5,14 @@ package disk import ( "context" - "fmt" - "github.com/power-devops/perfstat" "github.com/shirou/gopsutil/v3/internal/common" ) -var FSType map[int]string - -func init() { - FSType = map[int]string{ - 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", - 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", - 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", - 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", - } -} - -func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { - f, err := perfstat.FileSystemStat() - if err != nil { - return nil, err - } - ret := make([]PartitionStat, len(f)) - - for _, fs := range f { - fstyp, exists := FSType[fs.FSType] - if !exists { - fstyp = "unknown" - } - info := PartitionStat{ - Device: fs.Device, - Mountpoint: fs.MountPoint, - Fstype: fstyp, - } - ret = append(ret, info) - } - - return ret, err -} - func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { return nil, common.ErrNotImplementedError } -func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { - f, err := perfstat.FileSystemStat() - if err != nil { - return nil, err - } - - blocksize := uint64(512) - for _, fs := range f { - if path == fs.MountPoint { - fstyp, exists := FSType[fs.FSType] - if !exists { - fstyp = "unknown" - } - info := UsageStat{ - Path: path, - Fstype: fstyp, - Total: uint64(fs.TotalBlocks) * blocksize, - Free: uint64(fs.FreeBlocks) * blocksize, - Used: uint64(fs.TotalBlocks-fs.FreeBlocks) * blocksize, - InodesTotal: uint64(fs.TotalInodes), - InodesFree: uint64(fs.FreeInodes), - InodesUsed: uint64(fs.TotalInodes - fs.FreeInodes), - } - info.UsedPercent = (float64(info.Used) / float64(info.Total)) * 100.0 - info.InodesUsedPercent = (float64(info.InodesUsed) / float64(info.InodesTotal)) * 100.0 - return &info, nil - } - } - return nil, fmt.Errorf("mountpoint %s not found", path) -} - func SerialNumberWithContext(ctx context.Context, name string) (string, error) { return "", common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go new file mode 100644 index 0000000..aa534df --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_cgo.go @@ -0,0 +1,76 @@ +//go:build aix && cgo +// +build aix,cgo + +package disk + +import ( + "context" + "fmt" + + "github.com/power-devops/perfstat" +) + +var FSType map[int]string + +func init() { + FSType = map[int]string{ + 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", + 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", + 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", + 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", + } +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + f, err := perfstat.FileSystemStat() + if err != nil { + return nil, err + } + ret := make([]PartitionStat, len(f)) + + for _, fs := range f { + fstyp, exists := FSType[fs.FSType] + if !exists { + fstyp = "unknown" + } + info := PartitionStat{ + Device: fs.Device, + Mountpoint: fs.MountPoint, + Fstype: fstyp, + } + ret = append(ret, info) + } + + return ret, err +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + f, err := perfstat.FileSystemStat() + if err != nil { + return nil, err + } + + blocksize := uint64(512) + for _, fs := range f { + if path == fs.MountPoint { + fstyp, exists := FSType[fs.FSType] + if !exists { + fstyp = "unknown" + } + info := UsageStat{ + Path: path, + Fstype: fstyp, + Total: uint64(fs.TotalBlocks) * blocksize, + Free: uint64(fs.FreeBlocks) * blocksize, + Used: uint64(fs.TotalBlocks-fs.FreeBlocks) * blocksize, + InodesTotal: uint64(fs.TotalInodes), + InodesFree: uint64(fs.FreeInodes), + InodesUsed: uint64(fs.TotalInodes - fs.FreeInodes), + } + info.UsedPercent = (float64(info.Used) / float64(info.Total)) * 100.0 + info.InodesUsedPercent = (float64(info.InodesUsed) / float64(info.InodesTotal)) * 100.0 + return &info, nil + } + } + return nil, fmt.Errorf("mountpoint %s not found", path) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go new file mode 100644 index 0000000..190a2d9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go @@ -0,0 +1,82 @@ +//go:build aix && !cgo +// +build aix,!cgo + +package disk + +import ( + "context" + "regexp" + "strings" + + "golang.org/x/sys/unix" + "github.com/shirou/gopsutil/v3/internal/common" +) + +var whiteSpaces = regexp.MustCompile(`\s+`) +var startBlank = regexp.MustCompile(`^\s+`) + +var ignoreFSType = map[string]bool{"procfs": true} +var FSType = map[int]string{ + 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", + 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", + 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", + 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", + } + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + out, err := invoke.CommandWithContext(ctx, "mount") + if err != nil { + return nil, err + } + + // parse head lines for column names + colidx := make(map[string]int) + lines := strings.Split(string(out), "\n") + if len(lines) < 3 { + return nil, common.ErrNotImplementedError + } + + idx := 0 + start := 0 + finished := false + for pos, ch := range lines[1] { + if ch == ' ' && ! finished { + name := strings.TrimSpace(lines[0][start:pos]) + colidx[name] = idx + finished = true + } else if ch == '-' && finished { + idx++ + start = pos + finished = false + } + } + name := strings.TrimSpace(lines[0][start:len(lines[1])]) + colidx[name] = idx + + for idx := 2; idx < len(lines); idx++ { + line := lines[idx] + if startBlank.MatchString(line) { + line = "localhost" + line + } + p := whiteSpaces.Split(lines[idx], 6) + if len(p) < 5 || ignoreFSType[p[colidx["vfs"]]] { + continue + } + d := PartitionStat{ + Device: p[colidx["mounted"]], + Mountpoint: p[colidx["mounted over"]], + Fstype: p[colidx["vfs"]], + Opts: strings.Split(p[colidx["options"]], ","), + } + + ret = append(ret, d) + } + + return ret, nil +} + +func getFsType(stat unix.Statfs_t) string { + return FSType[int(stat.Vfstype)] +} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go index 0877b76..933cb04 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin.go @@ -20,9 +20,15 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro return ret, err } fs := make([]unix.Statfs_t, count) - if _, err = unix.Getfsstat(fs, unix.MNT_WAIT); err != nil { + count, err = unix.Getfsstat(fs, unix.MNT_WAIT) + if err != nil { return ret, err } + // On 10.14, and possibly other OS versions, the actual count may + // be less than from the first call. Truncate to the returned count + // to prevent accessing uninitialized entries. + // https://github.com/shirou/gopsutil/issues/1390 + fs = fs[:count] for _, stat := range fs { opts := []string{"rw"} if stat.Flags&unix.MNT_RDONLY != 0 { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go index b041c8d..27c24c9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_cgo.go @@ -1,5 +1,5 @@ -//go:build darwin && cgo -// +build darwin,cgo +//go:build darwin && cgo && !ios +// +build darwin,cgo,!ios package disk diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go index 99bb8ba..1f099b7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_darwin_nocgo.go @@ -1,5 +1,5 @@ -//go:build darwin && !cgo -// +build darwin,!cgo +//go:build (darwin && !cgo) || ios +// +build darwin,!cgo ios package disk diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go index 8f4ede5..753ce9a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go @@ -9,7 +9,6 @@ import ( "context" "encoding/binary" "fmt" - "os/exec" "strconv" "strings" @@ -137,6 +136,7 @@ func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOC IoTime: uint64(d.Busy_time.Compute() * 1000), Name: name, } + ds.SerialNumber, _ = SerialNumberWithContext(ctx, name) ret[name] = ds } @@ -167,11 +167,7 @@ func getFsType(stat unix.Statfs_t) string { } func SerialNumberWithContext(ctx context.Context, name string) (string, error) { - geom, err := exec.LookPath("geom") - if err != nil { - return "", fmt.Errorf("find geom: %w", err) - } - geomOut, err := invoke.CommandWithContext(ctx, geom, "disk", "list", name) + geomOut, err := invoke.CommandWithContext(ctx, "geom", "disk", "list", name) if err != nil { return "", fmt.Errorf("exec geom: %w", err) } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go index 5a0cf46..3911af9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go @@ -11,6 +11,7 @@ import ( "fmt" "io/ioutil" "os" + "path" "path/filepath" "strconv" "strings" @@ -100,8 +101,12 @@ const ( AFS_SUPER_MAGIC = 0x5346414F AUFS_SUPER_MAGIC = 0x61756673 ANON_INODE_FS_SUPER_MAGIC = 0x09041934 + BPF_FS_MAGIC = 0xCAFE4A11 CEPH_SUPER_MAGIC = 0x00C36400 + CGROUP2_SUPER_MAGIC = 0x63677270 + CONFIGFS_MAGIC = 0x62656570 ECRYPTFS_SUPER_MAGIC = 0xF15F + F2FS_SUPER_MAGIC = 0xF2F52010 FAT_SUPER_MAGIC = 0x4006 FHGFS_SUPER_MAGIC = 0x19830326 FUSEBLK_SUPER_MAGIC = 0x65735546 @@ -116,9 +121,11 @@ const ( KAFS_SUPER_MAGIC = 0x6B414653 LUSTRE_SUPER_MAGIC = 0x0BD00BD0 NFSD_SUPER_MAGIC = 0x6E667364 + NSFS_MAGIC = 0x6E736673 PANFS_SUPER_MAGIC = 0xAAD7AAEA RPC_PIPEFS_SUPER_MAGIC = 0x67596969 SECURITYFS_SUPER_MAGIC = 0x73636673 + TRACEFS_MAGIC = 0x74726163 UFS_BYTESWAPPED_SUPER_MAGIC = 0x54190100 VMHGFS_SUPER_MAGIC = 0xBACBACBC VZFS_SUPER_MAGIC = 0x565A4653 @@ -137,21 +144,26 @@ var fsTypeMap = map[int64]string{ BDEVFS_MAGIC: "bdevfs", /* 0x62646576 local */ BFS_MAGIC: "bfs", /* 0x1BADFACE local */ BINFMTFS_MAGIC: "binfmt_misc", /* 0x42494E4D local */ + BPF_FS_MAGIC: "bpf", /* 0xCAFE4A11 local */ BTRFS_SUPER_MAGIC: "btrfs", /* 0x9123683E local */ CEPH_SUPER_MAGIC: "ceph", /* 0x00C36400 remote */ CGROUP_SUPER_MAGIC: "cgroupfs", /* 0x0027E0EB local */ + CGROUP2_SUPER_MAGIC: "cgroup2fs", /* 0x63677270 local */ CIFS_MAGIC_NUMBER: "cifs", /* 0xFF534D42 remote */ CODA_SUPER_MAGIC: "coda", /* 0x73757245 remote */ COH_SUPER_MAGIC: "coh", /* 0x012FF7B7 local */ + CONFIGFS_MAGIC: "configfs", /* 0x62656570 local */ CRAMFS_MAGIC: "cramfs", /* 0x28CD3D45 local */ DEBUGFS_MAGIC: "debugfs", /* 0x64626720 local */ DEVFS_SUPER_MAGIC: "devfs", /* 0x1373 local */ DEVPTS_SUPER_MAGIC: "devpts", /* 0x1CD1 local */ ECRYPTFS_SUPER_MAGIC: "ecryptfs", /* 0xF15F local */ + EFIVARFS_MAGIC: "efivarfs", /* 0xDE5E81E4 local */ EFS_SUPER_MAGIC: "efs", /* 0x00414A53 local */ EXT_SUPER_MAGIC: "ext", /* 0x137D local */ EXT2_SUPER_MAGIC: "ext2/ext3", /* 0xEF53 local */ EXT2_OLD_SUPER_MAGIC: "ext2", /* 0xEF51 local */ + F2FS_SUPER_MAGIC: "f2fs", /* 0xF2F52010 local */ FAT_SUPER_MAGIC: "fat", /* 0x4006 local */ FHGFS_SUPER_MAGIC: "fhgfs", /* 0x19830326 remote */ FUSEBLK_SUPER_MAGIC: "fuseblk", /* 0x65735546 remote */ @@ -184,6 +196,7 @@ var fsTypeMap = map[int64]string{ NFS_SUPER_MAGIC: "nfs", /* 0x6969 remote */ NFSD_SUPER_MAGIC: "nfsd", /* 0x6E667364 remote */ NILFS_SUPER_MAGIC: "nilfs", /* 0x3434 local */ + NSFS_MAGIC: "nsfs", /* 0x6E736673 local */ NTFS_SB_MAGIC: "ntfs", /* 0x5346544E local */ OPENPROM_SUPER_MAGIC: "openprom", /* 0x9FA1 local */ OCFS2_SUPER_MAGIC: "ocfs2", /* 0x7461636f remote */ @@ -206,6 +219,7 @@ var fsTypeMap = map[int64]string{ SYSV2_SUPER_MAGIC: "sysv2", /* 0x012FF7B6 local */ SYSV4_SUPER_MAGIC: "sysv4", /* 0x012FF7B5 local */ TMPFS_MAGIC: "tmpfs", /* 0x01021994 local */ + TRACEFS_MAGIC: "tracefs", /* 0x74726163 local */ UDF_SUPER_MAGIC: "udf", /* 0x15013346 local */ UFS_MAGIC: "ufs", /* 0x00011954 local */ UFS_BYTESWAPPED_SUPER_MAGIC: "ufs", /* 0x54190100 local */ @@ -221,20 +235,45 @@ var fsTypeMap = map[int64]string{ ZFS_SUPER_MAGIC: "zfs", /* 0x2FC12FC1 local */ } -func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { - useMounts := false - - filename := common.HostProc("1/mountinfo") - lines, err := common.ReadLines(filename) +// readMountFile reads mountinfo or mounts file under the specified root path +// (eg, /proc/1, /proc/self, etc) +func readMountFile(root string) (lines []string, useMounts bool, filename string, err error) { + filename = path.Join(root, "mountinfo") + lines, err = common.ReadLines(filename) if err != nil { var pathErr *os.PathError if !errors.As(err, &pathErr) { - return nil, err + return } // if kernel does not support 1/mountinfo, fallback to 1/mounts (<2.6.26) useMounts = true - filename = common.HostProc("1/mounts") + filename = path.Join(root, "mounts") lines, err = common.ReadLines(filename) + if err != nil { + return + } + return + } + return +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + // by default, try "/proc/1/..." first + root := common.HostProc(path.Join("1")) + + // force preference for dirname of HOST_PROC_MOUNTINFO, if set #1271 + hpmPath := os.Getenv("HOST_PROC_MOUNTINFO") + if hpmPath != "" { + root = filepath.Dir(hpmPath) + } + + lines, useMounts, filename, err := readMountFile(root) + if err != nil { + if hpmPath != "" { // don't fallback with HOST_PROC_MOUNTINFO + return nil, err + } + // fallback to "/proc/self/..." #1159 + lines, useMounts, filename, err = readMountFile(common.HostProc(path.Join("self"))) if err != nil { return nil, err } @@ -312,10 +351,9 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro // so we get the real device name from its major/minor number if d.Device == "/dev/root" { devpath, err := os.Readlink(common.HostSys("/dev/block/" + blockDeviceID)) - if err != nil { - return nil, err + if err == nil { + d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1) } - d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1) } } ret = append(ret, d) diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go index e065865..81ff239 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd.go @@ -57,9 +57,9 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro } d := PartitionStat{ - Device: common.IntToString(stat.F_mntfromname[:]), - Mountpoint: common.IntToString(stat.F_mntonname[:]), - Fstype: common.IntToString(stat.F_fstypename[:]), + Device: common.ByteToString(stat.F_mntfromname[:]), + Mountpoint: common.ByteToString(stat.F_mntonname[:]), + Fstype: common.ByteToString(stat.F_fstypename[:]), Opts: opts, } @@ -147,7 +147,7 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { } func getFsType(stat unix.Statfs_t) string { - return common.IntToString(stat.F_fstypename[:]) + return common.ByteToString(stat.F_fstypename[:]) } func SerialNumberWithContext(ctx context.Context, name string) (string, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go new file mode 100644 index 0000000..86054a6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_arm.go @@ -0,0 +1,38 @@ +//go:build openbsd && arm +// +build openbsd,arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs disk/types_openbsd.go + +package disk + +const ( + devstat_NO_DATA = 0x00 + devstat_READ = 0x01 + devstat_WRITE = 0x02 + devstat_FREE = 0x03 +) + +const ( + sizeOfDiskstats = 0x60 +) + +type Diskstats struct { + Name [16]int8 + Busy int32 + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime Timeval + Timestamp Timeval + Time Timeval +} +type Timeval struct { + Sec int64 + Usec int32 +} + +type Diskstat struct{} +type bintime struct{} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go index 126a9e1..934d651 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go @@ -5,10 +5,15 @@ package disk import ( "bufio" + "bytes" "context" "fmt" "math" "os" + "path/filepath" + "regexp" + "runtime" + "strconv" "strings" "github.com/shirou/gopsutil/v3/internal/common" @@ -72,20 +77,129 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro }) } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err) + return nil, fmt.Errorf("unable to scan %q: %w", _MNTTAB, err) } return ret, err } func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { - return nil, common.ErrNotImplementedError + var issolaris bool + if runtime.GOOS == "illumos" { + issolaris = false + } else { + issolaris = true + } + // check disks instead of zfs pools + filterstr := "/[^zfs]/:::/^nread$|^nwritten$|^reads$|^writes$|^rtime$|^wtime$/" + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "disk", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no disk class found") + } + dnamearr := make(map[string]string) + nreadarr := make(map[string]uint64) + nwrittenarr := make(map[string]uint64) + readsarr := make(map[string]uint64) + writesarr := make(map[string]uint64) + rtimearr := make(map[string]uint64) + wtimearr := make(map[string]uint64) + re := regexp.MustCompile(`[:\s]+`) + + // in case the name is "/dev/sda1", then convert to "sda1" + for i, name := range names { + names[i] = filepath.Base(name) + } + + for _, line := range lines { + fields := re.Split(line, -1) + if len(fields) == 0 { + continue + } + moduleName := fields[0] + instance := fields[1] + dname := fields[2] + + if len(names) > 0 && !common.StringsHas(names, dname) { + continue + } + dnamearr[moduleName+instance] = dname + // fields[3] is the statistic label, fields[4] is the value + switch fields[3] { + case "nread": + nreadarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "nwritten": + nwrittenarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "reads": + readsarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "writes": + writesarr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return nil, err + } + case "rtime": + if issolaris { + // from sec to milli secs + var frtime float64 + frtime, err = strconv.ParseFloat((fields[4]), 64) + rtimearr[moduleName+instance] = uint64(frtime * 1000) + } else { + // from nano to milli secs + rtimearr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + rtimearr[moduleName+instance] = rtimearr[moduleName+instance] / 1000 / 1000 + } + if err != nil { + return nil, err + } + case "wtime": + if issolaris { + // from sec to milli secs + var fwtime float64 + fwtime, err = strconv.ParseFloat((fields[4]), 64) + wtimearr[moduleName+instance] = uint64(fwtime * 1000) + } else { + // from nano to milli secs + wtimearr[moduleName+instance], err = strconv.ParseUint((fields[4]), 10, 64) + wtimearr[moduleName+instance] = wtimearr[moduleName+instance] / 1000 / 1000 + } + if err != nil { + return nil, err + } + } + } + + ret := make(map[string]IOCountersStat, 0) + for k := range dnamearr { + d := IOCountersStat{ + Name: dnamearr[k], + ReadBytes: nreadarr[k], + WriteBytes: nwrittenarr[k], + ReadCount: readsarr[k], + WriteCount: writesarr[k], + ReadTime: rtimearr[k], + WriteTime: wtimearr[k], + } + ret[d.Name] = d + } + return ret, nil } func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { statvfs := unix.Statvfs_t{} if err := unix.Statvfs(path, &statvfs); err != nil { - return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err) + return nil, fmt.Errorf("unable to call statvfs(2) on %q: %w", path, err) } usageStat := &UsageStat{ @@ -114,7 +228,31 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { } func SerialNumberWithContext(ctx context.Context, name string) (string, error) { - return "", common.ErrNotImplementedError + out, err := invoke.CommandWithContext(ctx, "cfgadm", "-ls", "select=type(disk),cols=ap_id:info,cols2=,noheadings") + if err != nil { + return "", fmt.Errorf("exec cfgadm: %w", err) + } + + suf := "::" + strings.TrimPrefix(name, "/dev/") + s := bufio.NewScanner(bytes.NewReader(out)) + for s.Scan() { + flds := strings.Fields(s.Text()) + if strings.HasSuffix(flds[0], suf) { + flen := len(flds) + if flen >= 3 { + for i, f := range flds { + if i > 0 && i < flen-1 && f == "SN:" { + return flds[i+1], nil + } + } + } + return "", nil + } + } + if err := s.Err(); err != nil { + return "", err + } + return "", nil } func LabelWithContext(ctx context.Context, name string) (string, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go index bdb62b2..1e73524 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_unix.go @@ -1,5 +1,5 @@ -//go:build freebsd || linux || darwin -// +build freebsd linux darwin +//go:build freebsd || linux || darwin || (aix && !cgo) +// +build freebsd linux darwin aix,!cgo package disk @@ -27,13 +27,22 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { InodesFree: (uint64(stat.Ffree)), } + ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) + + if (ret.Used + ret.Free) == 0 { + ret.UsedPercent = 0 + } else { + // We don't use ret.Total to calculate percent. + // see https://github.com/shirou/gopsutil/issues/562 + ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 + } + // if could not get InodesTotal, return empty if ret.InodesTotal < ret.InodesFree { return ret, nil } ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) - ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) if ret.InodesTotal == 0 { ret.InodesUsedPercent = 0 @@ -41,14 +50,6 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 } - if (ret.Used + ret.Free) == 0 { - ret.UsedPercent = 0 - } else { - // We don't use ret.Total to calculate percent. - // see https://github.com/shirou/gopsutil/issues/562 - ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 - } - return ret, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go index 9106364..6aa47a7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go @@ -12,8 +12,11 @@ import ( "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) +type Warnings = common.Warnings + var ( procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW") procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW") @@ -44,6 +47,14 @@ type diskPerformance struct { alignmentPadding uint32 // necessary for 32bit support, see https://github.com/elastic/beats/pull/16553 } +func init() { + // enable disk performance counters on Windows Server editions (needs to run as admin) + key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Services\PartMgr`, registry.SET_VALUE) + if err == nil { + key.SetDWordValue("EnableCounterForIoctl", 1) + } +} + func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { lpFreeBytesAvailable := int64(0) lpTotalNumberOfBytes := int64(0) @@ -71,6 +82,9 @@ func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { } func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + warnings := Warnings{ + Verbose: true, + } var ret []PartitionStat lpBuffer := make([]byte, 254) diskret, _, err := procGetLogicalDriveStringsW.Call( @@ -85,7 +99,9 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro typepath, _ := windows.UTF16PtrFromString(path) typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) if typeret == 0 { - return ret, windows.GetLastError() + err := windows.GetLastError() + warnings.Add(err) + continue } // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM @@ -109,7 +125,8 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro if typeret == 5 || typeret == 2 { continue // device is not ready will happen if there is no disk in the drive } - return ret, err + warnings.Add(err) + continue } opts := []string{"rw"} if lpFileSystemFlags&fileReadOnlyVolume != 0 { @@ -129,7 +146,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro } } } - return ret, nil + return ret, warnings.Reference() } func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c b/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c index 5667bf9..8aab04f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c +++ b/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.c @@ -18,17 +18,15 @@ static int fillstat(io_registry_entry_t d, DriveStats *stat); int gopsutil_v3_readdrivestat(DriveStats a[], int n) { - mach_port_t port; CFMutableDictionaryRef match; io_iterator_t drives; io_registry_entry_t d; kern_return_t status; int na, rv; - IOMainPort(bootstrap_port, &port); match = IOServiceMatching("IOMedia"); CFDictionaryAddValue(match, CFSTR(kIOMediaWholeKey), kCFBooleanTrue); - status = IOServiceGetMatchingServices(port, match, &drives); + status = IOServiceGetMatchingServices(0, match, &drives); if(status != KERN_SUCCESS) return -1; diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h b/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h index d004ac9..cb9ec7a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h +++ b/vendor/github.com/shirou/gopsutil/v3/disk/iostat_darwin.h @@ -30,7 +30,3 @@ struct CPUStats { }; extern int gopsutil_v3_readdrivestat(DriveStats a[], int n); - -#if (MAC_OS_X_VERSION_MIN_REQUIRED < 120000) // Before macOS 12 Monterey - #define IOMainPort IOMasterPort -#endif diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go index 575a08b..2f20fc6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go @@ -10,7 +10,6 @@ import ( "errors" "io/ioutil" "os" - "os/exec" "strings" "unsafe" @@ -23,12 +22,7 @@ import ( const user_PROCESS = 7 func HostIDWithContext(ctx context.Context) (string, error) { - ioreg, err := exec.LookPath("ioreg") - if err != nil { - return "", err - } - - out, err := invoke.CommandWithContext(ctx, ioreg, "-rd1", "-c", "IOPlatformExpertDevice") + out, err := invoke.CommandWithContext(ctx, "ioreg", "-rd1", "-c", "IOPlatformExpertDevice") if err != nil { return "", err } @@ -102,17 +96,12 @@ func PlatformInformationWithContext(ctx context.Context) (string, string, string family := "" pver := "" - sw_vers, err := exec.LookPath("sw_vers") - if err != nil { - return "", "", "", err - } - p, err := unix.Sysctl("kern.ostype") if err == nil { platform = strings.ToLower(p) } - out, err := invoke.CommandWithContext(ctx, sw_vers, "-productVersion") + out, err := invoke.CommandWithContext(ctx, "sw_vers", "-productVersion") if err == nil { pver = strings.ToLower(strings.TrimSpace(string(out))) } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_386.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_386.go deleted file mode 100644 index 8caeed2..0000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_386.go +++ /dev/null @@ -1,20 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go - -package host - -type Utmpx struct { - User [256]int8 - ID [4]int8 - Line [32]int8 - Pid int32 - Type int16 - Pad_cgo_0 [6]byte - Tv Timeval - Host [256]int8 - Pad [16]uint32 -} - -type Timeval struct { - Sec int32 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go index def9a67..5c07c69 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go @@ -10,7 +10,6 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "regexp" "strconv" @@ -20,6 +19,8 @@ import ( "golang.org/x/sys/unix" ) +type Warnings = common.Warnings + type lsbStruct struct { ID string Release string @@ -148,11 +149,7 @@ func getlsbStruct() (*lsbStruct, error) { } } } else if common.PathExists("/usr/bin/lsb_release") { - lsb_release, err := exec.LookPath("lsb_release") - if err != nil { - return ret, err - } - out, err := invoke.Command(lsb_release) + out, err := invoke.Command("/usr/bin/lsb_release") if err != nil { return ret, err } @@ -184,26 +181,26 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil lsb = &lsbStruct{} } - if common.PathExists(common.HostEtc("oracle-release")) { + if common.PathExistsWithContents(common.HostEtc("oracle-release")) { platform = "oracle" contents, err := common.ReadLines(common.HostEtc("oracle-release")) if err == nil { version = getRedhatishVersion(contents) } - } else if common.PathExists(common.HostEtc("enterprise-release")) { + } else if common.PathExistsWithContents(common.HostEtc("enterprise-release")) { platform = "oracle" contents, err := common.ReadLines(common.HostEtc("enterprise-release")) if err == nil { version = getRedhatishVersion(contents) } - } else if common.PathExists(common.HostEtc("slackware-version")) { + } else if common.PathExistsWithContents(common.HostEtc("slackware-version")) { platform = "slackware" contents, err := common.ReadLines(common.HostEtc("slackware-version")) if err == nil { version = getSlackwareVersion(contents) } - } else if common.PathExists(common.HostEtc("debian_version")) { + } else if common.PathExistsWithContents(common.HostEtc("debian_version")) { if lsb.ID == "Ubuntu" { platform = "ubuntu" version = lsb.Release @@ -211,7 +208,7 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil platform = "linuxmint" version = lsb.Release } else { - if common.PathExists("/usr/bin/raspi-config") { + if common.PathExistsWithContents("/usr/bin/raspi-config") { platform = "raspbian" } else { platform = "debian" @@ -221,6 +218,12 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil version = contents[0] } } + } else if common.PathExists(common.HostEtc("neokylin-release")) { + contents, err := common.ReadLines(common.HostEtc("neokylin-release")) + if err == nil { + version = getRedhatishVersion(contents) + platform = getRedhatishPlatform(contents) + } } else if common.PathExists(common.HostEtc("redhat-release")) { contents, err := common.ReadLines(common.HostEtc("redhat-release")) if err == nil { @@ -278,6 +281,8 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil version = lsb.Release } + platform = strings.Trim(platform, `"`) + switch platform { case "debian", "ubuntu", "linuxmint", "raspbian": family = "debian" @@ -301,6 +306,8 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil family = "coreos" case "solus": family = "solus" + case "neokylin": + family = "neokylin" } return platform, family, version, nil @@ -312,7 +319,7 @@ func KernelVersionWithContext(ctx context.Context) (version string, err error) { if err != nil { return "", err } - return string(utsname.Release[:bytes.IndexByte(utsname.Release[:], 0)]), nil + return unix.ByteSliceToString(utsname.Release[:]), nil } func getSlackwareVersion(contents []string) string { @@ -327,7 +334,7 @@ func getRedhatishVersion(contents []string) string { if strings.Contains(c, "rawhide") { return "rawhide" } - if matches := regexp.MustCompile(`release (\d[\d.]*)`).FindStringSubmatch(c); matches != nil { + if matches := regexp.MustCompile(`release (\w[\d.]*)`).FindStringSubmatch(c); matches != nil { return matches[1] } return "" diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go new file mode 100644 index 0000000..f0ac57d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go @@ -0,0 +1,34 @@ +//go:build openbsd && arm +// +build openbsd,arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs host/types_openbsd.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x130 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [32]int8 + Host [256]int8 + Time int64 +} +type Timeval struct { + Sec int64 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go index 89e6378..24529f1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go @@ -3,14 +3,13 @@ package host -import ( - "bytes" - - "golang.org/x/sys/unix" -) +import "golang.org/x/sys/unix" func KernelArch() (string, error) { var utsname unix.Utsname err := unix.Uname(&utsname) - return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), err + if err != nil { + return "", err + } + return unix.ByteSliceToString(utsname.Machine[:]), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go index 62d4786..7d3625a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go @@ -4,10 +4,11 @@ import ( "bufio" "bytes" "context" + "encoding/csv" "fmt" + "io" "io/ioutil" "os" - "os/exec" "regexp" "strconv" "strings" @@ -23,23 +24,20 @@ func HostIDWithContext(ctx context.Context) (string, error) { if platform == "SmartOS" { // If everything works, use the current zone ID as the HostID if present. - zonename, err := exec.LookPath("zonename") + out, err := invoke.CommandWithContext(ctx, "zonename") if err == nil { - out, err := invoke.CommandWithContext(ctx, zonename) - if err == nil { - sc := bufio.NewScanner(bytes.NewReader(out)) - for sc.Scan() { - line := sc.Text() - - // If we're in the global zone, rely on the hostname. - if line == "global" { - hostname, err := os.Hostname() - if err == nil { - return hostname, nil - } - } else { - return strings.TrimSpace(line), nil + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + + // If we're in the global zone, rely on the hostname. + if line == "global" { + hostname, err := os.Hostname() + if err == nil { + return hostname, nil } + } else { + return strings.TrimSpace(line), nil } } } @@ -48,15 +46,12 @@ func HostIDWithContext(ctx context.Context) (string, error) { // If HostID is still unknown, use hostid(1), which can lie to callers but at // this point there are no hardware facilities available. This behavior // matches that of other supported OSes. - hostID, err := exec.LookPath("hostid") + out, err := invoke.CommandWithContext(ctx, "hostid") if err == nil { - out, err := invoke.CommandWithContext(ctx, hostID) - if err == nil { - sc := bufio.NewScanner(bytes.NewReader(out)) - for sc.Scan() { - line := sc.Text() - return strings.TrimSpace(line), nil - } + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + return strings.TrimSpace(line), nil } } @@ -75,12 +70,7 @@ func numProcs(ctx context.Context) (uint64, error) { var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) func BootTimeWithContext(ctx context.Context) (uint64, error) { - kstat, err := exec.LookPath("kstat") - if err != nil { - return 0, err - } - - out, err := invoke.CommandWithContext(ctx, kstat, "-p", "unix:0:system_misc:boot_time") + out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "unix:0:system_misc:boot_time") if err != nil { return 0, err } @@ -106,7 +96,40 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError + var ret []TemperatureStat + + out, err := invoke.CommandWithContext(ctx, "ipmitool", "-c", "sdr", "list") + if err != nil { + return ret, err + } + + r := csv.NewReader(strings.NewReader(string(out))) + // Output may contain errors, e.g. "bmc_send_cmd: Permission denied", don't expect a consistent number of records + r.FieldsPerRecord = -1 + for { + record, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + return ret, err + } + // CPU1 Temp,40,degrees C,ok + if len(record) < 3 || record[1] == "" || record[2] != "degrees C" { + continue + } + v, err := strconv.ParseFloat(record[1], 64) + if err != nil { + return ret, err + } + ts := TemperatureStat{ + SensorKey: strings.TrimSuffix(record[0], " Temp"), + Temperature: v, + } + ret = append(ret, ts) + } + + return ret, nil } func VirtualizationWithContext(ctx context.Context) (string, string, error) { @@ -146,12 +169,7 @@ func parseReleaseFile() (string, error) { // parseUnameOutput returns platformFamily, kernelVersion and platformVersion func parseUnameOutput(ctx context.Context) (string, string, string, error) { - uname, err := exec.LookPath("uname") - if err != nil { - return "", "", "", err - } - - out, err := invoke.CommandWithContext(ctx, uname, "-srv") + out, err := invoke.CommandWithContext(ctx, "uname", "-srv") if err != nil { return "", "", "", err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go b/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go index fcd1d59..a16892d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go @@ -188,6 +188,14 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil } } + var UBR uint32 // Update Build Revision + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`UBR`), nil, &valType, nil, &bufLen) + if err == nil { + regBuf := make([]byte, 4) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`UBR`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + copy((*[4]byte)(unsafe.Pointer(&UBR))[:], regBuf) + } + // PlatformFamily switch osInfo.wProductType { case 1: @@ -199,7 +207,9 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil } // Platform Version - version = fmt.Sprintf("%d.%d.%d Build %d", osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, osInfo.dwBuildNumber) + version = fmt.Sprintf("%d.%d.%d.%d Build %d.%d", + osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, UBR, + osInfo.dwBuildNumber, UBR) return platform, family, version, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c index fa54b1d..0197d95 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c +++ b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c @@ -72,8 +72,7 @@ kern_return_t gopsutil_v3_open_smc(void) { kern_return_t result; io_service_t service; - service = IOServiceGetMatchingService(kIOMainPortDefault, - IOServiceMatching(IOSERVICE_SMC)); + service = IOServiceGetMatchingService(0, IOServiceMatching(IOSERVICE_SMC)); if (service == 0) { // Note: IOServiceMatching documents 0 on failure printf("ERROR: %s NOT FOUND\n", IOSERVICE_SMC); diff --git a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h index 8d5ba68..e3013ab 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h +++ b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h @@ -29,9 +29,4 @@ kern_return_t gopsutil_v3_open_smc(void); kern_return_t gopsutil_v3_close_smc(void); double gopsutil_v3_get_temperature(char *); -#if (MAC_OS_X_VERSION_MIN_REQUIRED < 120000) // Before macOS 12 Monterey - #define kIOMainPortDefault kIOMasterPortDefault -#endif - - #endif // __SMC_H__ diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 4f6df9a..323402d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -114,8 +114,8 @@ func ReadLines(filename string) ([]string, error) { // ReadLinesOffsetN reads contents from file and splits them by new line. // The offset tells at which line number to start. // The count determines the number of lines to read (starting from offset): -// n >= 0: at most n lines -// n < 0: whole file +// n >= 0: at most n lines +// n < 0: whole file func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { f, err := os.Open(filename) if err != nil { @@ -311,6 +311,15 @@ func PathExists(filename string) bool { return false } +// PathExistsWithContents returns the filename exists and it is not empty +func PathExistsWithContents(filename string) bool { + info, err := os.Stat(filename) + if err != nil { + return false + } + return info.Size() > 4 // at least 4 bytes +} + // GetEnv retrieves the environment variable key. If it does not exist it returns the default. func GetEnv(key string, dfault string, combineWith ...string) string { value := os.Getenv(key) @@ -355,14 +364,8 @@ func HostDev(combineWith ...string) string { return GetEnv("HOST_DEV", "/dev", combineWith...) } -// MockEnv set environment variable and return revert function. -// MockEnv should be used testing only. -func MockEnv(key string, value string) func() { - original := os.Getenv(key) - os.Setenv(key, value) - return func() { - os.Setenv(key, original) - } +func HostRoot(combineWith ...string) string { + return GetEnv("HOST_ROOT", "/", combineWith...) } // getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go index 3b2b8e0..f1a7845 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go @@ -14,11 +14,7 @@ import ( ) func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - sysctl, err := exec.LookPath("sysctl") - if err != nil { - return []string{}, err - } - cmd := exec.CommandContext(ctx, sysctl, "-n", mib) + cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) out, err := cmd.Output() if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go index 2510f9d..f590e2e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go @@ -29,11 +29,7 @@ func SysctlUint(mib string) (uint64, error) { } func DoSysctrl(mib string) ([]string, error) { - sysctl, err := exec.LookPath("sysctl") - if err != nil { - return []string{}, err - } - cmd := exec.Command(sysctl, "-n", mib) + cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) out, err := cmd.Output() if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go index ffdc931..fa6373b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go @@ -12,15 +12,12 @@ import ( "strconv" "strings" "sync" + "syscall" "time" ) func DoSysctrl(mib string) ([]string, error) { - sysctl, err := exec.LookPath("sysctl") - if err != nil { - return []string{}, err - } - cmd := exec.Command(sysctl, "-n", mib) + cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) out, err := cmd.Output() if err != nil { @@ -56,7 +53,7 @@ func NumProcs() (uint64, error) { } func BootTimeWithContext(ctx context.Context) (uint64, error) { - system, role, err := Virtualization() + system, role, err := VirtualizationWithContext(ctx) if err != nil { return 0, err } @@ -72,6 +69,17 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { filename := HostProc(statFile) lines, err := ReadLines(filename) + if os.IsPermission(err) { + var info syscall.Sysinfo_t + err := syscall.Sysinfo(&info) + if err != nil { + return 0, err + } + + currentTime := time.Now().UnixNano() / int64(time.Second) + t := currentTime - int64(info.Uptime) + return uint64(t), nil + } if err != nil { return 0, err } @@ -153,6 +161,9 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if StringsContains(contents, "kvm") { system = "kvm" role = "host" + } else if StringsContains(contents, "hv_util") { + system = "hyperv" + role = "guest" } else if StringsContains(contents, "vboxdrv") { system = "vbox" role = "host" @@ -248,6 +259,11 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { } } + if PathExists(HostRoot(".dockerenv")) { + system = "docker" + role = "guest" + } + // before returning for the first time, cache the system and role cachedVirtOnce.Do(func() { cachedVirtMutex.Lock() @@ -278,6 +294,12 @@ func GetOSRelease() (platform string, version string, err error) { version = trimQuotes(field[1]) } } + + // cleanup amazon ID + if platform == "amzn" { + platform = "amazon" + } + return platform, version, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go index cbd3d28..58d76f3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go @@ -13,11 +13,7 @@ import ( ) func DoSysctrl(mib string) ([]string, error) { - sysctl, err := exec.LookPath("sysctl") - if err != nil { - return []string{}, err - } - cmd := exec.Command(sysctl, "-n", mib) + cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) out, err := cmd.Output() if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go index e0170f9..4af7e5c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go @@ -5,6 +5,7 @@ package common import ( "context" + "errors" "os/exec" "strconv" "strings" @@ -18,12 +19,11 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args .. cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} } cmd = append(cmd, args...) - lsof, err := exec.LookPath("lsof") - if err != nil { - return []string{}, err - } - out, err := invoke.CommandWithContext(ctx, lsof, cmd...) + out, err := invoke.CommandWithContext(ctx, "lsof", cmd...) if err != nil { + if errors.Is(err, exec.ErrNotFound) { + return []string{}, err + } // if no pid found, lsof returns code 1. if err.Error() == "exit status 1" && len(out) == 0 { return []string{}, nil @@ -42,12 +42,7 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args .. } func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { - cmd := []string{"-P", strconv.Itoa(int(pid))} - pgrep, err := exec.LookPath("pgrep") - if err != nil { - return []int32{}, err - } - out, err := invoke.CommandWithContext(ctx, pgrep, cmd...) + out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) if err != nil { return []int32{}, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go new file mode 100644 index 0000000..147cfdc --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go @@ -0,0 +1,10 @@ +package common + +import "unsafe" + +// IsLittleEndian checks if the current platform uses little-endian. +// copied from https://github.com/ntrrg/ntgo/blob/v0.8.0/runtime/infrastructure.go#L16 (MIT License) +func IsLittleEndian() bool { + var x int16 = 0x0011 + return *(*byte)(unsafe.Pointer(&x)) == 0x11 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/types.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go similarity index 59% rename from vendor/github.com/shirou/gopsutil/v3/host/types.go rename to vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go index c2e7c0b..a4aaada 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/types.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go @@ -1,11 +1,10 @@ -package host +package common -import ( - "fmt" -) +import "fmt" type Warnings struct { - List []error + List []error + Verbose bool } func (w *Warnings) Add(err error) { @@ -20,5 +19,12 @@ func (w *Warnings) Reference() error { } func (w *Warnings) Error() string { + if w.Verbose { + str := "" + for i, e := range w.List { + str += fmt.Sprintf("\tError %d: %s\n", i, e.Error()) + } + return str + } return fmt.Sprintf("Number of warnings: %v", len(w.List)) } diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_aix.go b/vendor/github.com/shirou/gopsutil/v3/load/load_aix.go index e7f9f94..78b3912 100644 --- a/vendor/github.com/shirou/gopsutil/v3/load/load_aix.go +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_aix.go @@ -3,69 +3,17 @@ package load -/* -#cgo LDFLAGS: -L/usr/lib -lperfstat - -#include -#include -*/ -import "C" - import ( "context" - "unsafe" - - "github.com/power-devops/perfstat" ) func Avg() (*AvgStat, error) { return AvgWithContext(context.Background()) } -func AvgWithContext(ctx context.Context) (*AvgStat, error) { - c, err := perfstat.CpuTotalStat() - if err != nil { - return nil, err - } - ret := &AvgStat{ - Load1: float64(c.LoadAvg1), - Load5: float64(c.LoadAvg5), - Load15: float64(c.LoadAvg15), - } - - return ret, nil -} - // Misc returns miscellaneous host-wide statistics. // darwin use ps command to get process running/blocked count. // Almost same as Darwin implementation, but state is different. func Misc() (*MiscStat, error) { return MiscWithContext(context.Background()) } - -func MiscWithContext(ctx context.Context) (*MiscStat, error) { - info := C.struct_procentry64{} - cpid := C.pid_t(0) - - ret := MiscStat{} - for { - // getprocs first argument is a void* - num, err := C.getprocs64(unsafe.Pointer(&info), C.sizeof_struct_procentry64, nil, 0, &cpid, 1) - if err != nil { - return nil, err - } - - ret.ProcsTotal++ - switch info.pi_state { - case C.SACTIVE: - ret.ProcsRunning++ - case C.SSTOP: - ret.ProcsBlocked++ - } - - if num == 0 { - break - } - } - return &ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/load/load_aix_cgo.go new file mode 100644 index 0000000..bbbf287 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_aix_cgo.go @@ -0,0 +1,60 @@ +//go:build aix && cgo +// +build aix,cgo + +package load + +/* +#cgo LDFLAGS: -L/usr/lib -lperfstat + +#include +#include +*/ +import "C" + +import ( + "context" + "unsafe" + + "github.com/power-devops/perfstat" +) + +func AvgWithContext(ctx context.Context) (*AvgStat, error) { + c, err := perfstat.CpuTotalStat() + if err != nil { + return nil, err + } + ret := &AvgStat{ + Load1: float64(c.LoadAvg1), + Load5: float64(c.LoadAvg5), + Load15: float64(c.LoadAvg15), + } + + return ret, nil +} + +func MiscWithContext(ctx context.Context) (*MiscStat, error) { + info := C.struct_procentry64{} + cpid := C.pid_t(0) + + ret := MiscStat{} + for { + // getprocs first argument is a void* + num, err := C.getprocs64(unsafe.Pointer(&info), C.sizeof_struct_procentry64, nil, 0, &cpid, 1) + if err != nil { + return nil, err + } + + ret.ProcsTotal++ + switch info.pi_state { + case C.SACTIVE: + ret.ProcsRunning++ + case C.SSTOP: + ret.ProcsBlocked++ + } + + if num == 0 { + break + } + } + return &ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/load/load_aix_nocgo.go new file mode 100644 index 0000000..bfc4850 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_aix_nocgo.go @@ -0,0 +1,66 @@ +//go:build aix && !cgo +// +build aix,!cgo + +package load + +import ( + "context" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +var separator = regexp.MustCompile(`,?\s+`) + +func AvgWithContext(ctx context.Context) (*AvgStat, error) { + line, err := invoke.CommandWithContext(ctx, "uptime") + if err != nil { + return nil, err + } + + idx := strings.Index(string(line), "load average:") + if idx < 0 { + return nil, common.ErrNotImplementedError + } + ret := &AvgStat{} + + p := separator.Split(string(line[idx:len(line)]), 5) + if 4 < len(p) && p[0] == "load" && p[1] == "average:" { + if t, err := strconv.ParseFloat(p[2], 64); err == nil { + ret.Load1 = t + } + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + ret.Load5 = t + } + if t, err := strconv.ParseFloat(p[4], 64); err == nil { + ret.Load15 = t + } + return ret, nil + } + + return nil, common.ErrNotImplementedError +} + +func MiscWithContext(ctx context.Context) (*MiscStat, error) { + out, err := invoke.CommandWithContext(ctx, "ps", "-Ao", "state") + if err != nil { + return nil, err + } + + ret := &MiscStat{} + for _, line := range strings.Split(string(out), "\n") { + ret.ProcsTotal++ + switch line { + case "R": + case "A": + ret.ProcsRunning++ + case "T": + ret.ProcsBlocked++ + default: + continue + } + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_bsd.go b/vendor/github.com/shirou/gopsutil/v3/load/load_bsd.go index 5c610a5..51d9286 100644 --- a/vendor/github.com/shirou/gopsutil/v3/load/load_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_bsd.go @@ -5,7 +5,6 @@ package load import ( "context" - "os/exec" "strings" "unsafe" @@ -52,11 +51,7 @@ func Misc() (*MiscStat, error) { } func MiscWithContext(ctx context.Context) (*MiscStat, error) { - bin, err := exec.LookPath("ps") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, bin, "axo", "state") + out, err := invoke.CommandWithContext(ctx, "ps", "axo", "state") if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_darwin.go b/vendor/github.com/shirou/gopsutil/v3/load/load_darwin.go index e0df3c9..ce80188 100644 --- a/vendor/github.com/shirou/gopsutil/v3/load/load_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_darwin.go @@ -5,7 +5,6 @@ package load import ( "context" - "os/exec" "strings" "unsafe" @@ -48,11 +47,7 @@ func Misc() (*MiscStat, error) { } func MiscWithContext(ctx context.Context) (*MiscStat, error) { - bin, err := exec.LookPath("ps") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, bin, "axo", "state") + out, err := invoke.CommandWithContext(ctx, "ps", "axo", "state") if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_solaris.go b/vendor/github.com/shirou/gopsutil/v3/load/load_solaris.go index aa6cd1e..99b339b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/load/load_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_solaris.go @@ -7,7 +7,6 @@ import ( "bufio" "bytes" "context" - "os/exec" "strconv" "strings" ) @@ -17,12 +16,7 @@ func Avg() (*AvgStat, error) { } func AvgWithContext(ctx context.Context) (*AvgStat, error) { - kstat, err := exec.LookPath("kstat") - if err != nil { - return nil, err - } - - out, err := invoke.CommandWithContext(ctx, kstat, "-p", "unix:0:system_misc:avenrun_*") + out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "unix:0:system_misc:avenrun_*") if err != nil { return nil, err } @@ -63,11 +57,7 @@ func Misc() (*MiscStat, error) { } func MiscWithContext(ctx context.Context) (*MiscStat, error) { - bin, err := exec.LookPath("ps") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, bin, "-efo", "s") + out, err := invoke.CommandWithContext(ctx, "ps", "-efo", "s") if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/load/load_windows.go b/vendor/github.com/shirou/gopsutil/v3/load/load_windows.go index 3ff0c0a..b484838 100644 --- a/vendor/github.com/shirou/gopsutil/v3/load/load_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/load/load_windows.go @@ -26,7 +26,7 @@ var ( // TODO instead of this goroutine, we can register a Win32 counter just as psutil does // see https://psutil.readthedocs.io/en/latest/#psutil.getloadavg // code https://github.com/giampaolo/psutil/blob/8415355c8badc9c94418b19bdf26e622f06f0cce/psutil/arch/windows/wmi.c -func loadAvgGoroutine() { +func loadAvgGoroutine(ctx context.Context) { var ( samplingFrequency time.Duration = 5 * time.Second loadAvgFactor1M float64 = 1 / math.Exp(samplingFrequency.Seconds()/time.Minute.Seconds()) @@ -37,20 +37,30 @@ func loadAvgGoroutine() { counter, err := common.ProcessorQueueLengthCounter() if err != nil || counter == nil { - log.Println("gopsutil: unexpected processor queue length counter error, please file an issue on github: err") + log.Printf("unexpected processor queue length counter error, %v\n", err) return } tick := time.NewTicker(samplingFrequency).C - for { + + f := func() { currentLoad, err = counter.GetValue() - loadAvgMutex.Lock() loadErr = err + loadAvgMutex.Lock() loadAvg1M = loadAvg1M*loadAvgFactor1M + currentLoad*(1-loadAvgFactor1M) loadAvg5M = loadAvg5M*loadAvgFactor5M + currentLoad*(1-loadAvgFactor5M) loadAvg15M = loadAvg15M*loadAvgFactor15M + currentLoad*(1-loadAvgFactor15M) loadAvgMutex.Unlock() - <-tick + } + + f() // run first time + for { + select { + case <-ctx.Done(): + return + case <-tick: + f() + } } } @@ -61,7 +71,7 @@ func Avg() (*AvgStat, error) { func AvgWithContext(ctx context.Context) (*AvgStat, error) { loadAvgGoroutineOnce.Do(func() { - go loadAvgGoroutine() + go loadAvgGoroutine(ctx) }) loadAvgMutex.RLock() defer loadAvgMutex.RUnlock() diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go index e56d5d3..ff960da 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go @@ -75,6 +75,8 @@ type VirtualMemoryStat struct { VmallocChunk uint64 `json:"vmallocChunk"` HugePagesTotal uint64 `json:"hugePagesTotal"` HugePagesFree uint64 `json:"hugePagesFree"` + HugePagesRsvd uint64 `json:"hugePagesRsvd"` + HugePagesSurp uint64 `json:"hugePagesSurp"` HugePageSize uint64 `json:"hugePageSize"` } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go index 9b2196d..22a6a4e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go @@ -5,55 +5,12 @@ package mem import ( "context" - - "github.com/power-devops/perfstat" ) func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - m, err := perfstat.MemoryTotalStat() - if err != nil { - return nil, err - } - pagesize := uint64(4096) - ret := VirtualMemoryStat{ - Total: uint64(m.RealTotal) * pagesize, - Available: uint64(m.RealAvailable) * pagesize, - Free: uint64(m.RealFree) * pagesize, - Used: uint64(m.RealInUse) * pagesize, - UsedPercent: 100 * float64(m.RealInUse) / float64(m.RealTotal), - Active: uint64(m.VirtualActive) * pagesize, - SwapTotal: uint64(m.PgSpTotal) * pagesize, - SwapFree: uint64(m.PgSpFree) * pagesize, - } - return &ret, nil -} - func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } - -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - m, err := perfstat.MemoryTotalStat() - if err != nil { - return nil, err - } - pagesize := uint64(4096) - swapUsed := uint64(m.PgSpTotal-m.PgSpFree-m.PgSpRsvd) * pagesize - swapTotal := uint64(m.PgSpTotal) * pagesize - ret := SwapMemoryStat{ - Total: swapTotal, - Free: uint64(m.PgSpFree) * pagesize, - Used: swapUsed, - UsedPercent: float64(100*swapUsed) / float64(swapTotal), - Sin: uint64(m.PgSpIn), - Sout: uint64(m.PgSpOut), - PgIn: uint64(m.PageIn), - PgOut: uint64(m.PageOut), - PgFault: uint64(m.PageFaults), - } - return &ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go new file mode 100644 index 0000000..67e11df --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go @@ -0,0 +1,51 @@ +//go:build aix && cgo +// +build aix,cgo + +package mem + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + m, err := perfstat.MemoryTotalStat() + if err != nil { + return nil, err + } + pagesize := uint64(4096) + ret := VirtualMemoryStat{ + Total: uint64(m.RealTotal) * pagesize, + Available: uint64(m.RealAvailable) * pagesize, + Free: uint64(m.RealFree) * pagesize, + Used: uint64(m.RealInUse) * pagesize, + UsedPercent: 100 * float64(m.RealInUse) / float64(m.RealTotal), + Active: uint64(m.VirtualActive) * pagesize, + SwapTotal: uint64(m.PgSpTotal) * pagesize, + SwapFree: uint64(m.PgSpFree) * pagesize, + } + return &ret, nil +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + m, err := perfstat.MemoryTotalStat() + if err != nil { + return nil, err + } + pagesize := uint64(4096) + swapUsed := uint64(m.PgSpTotal-m.PgSpFree-m.PgSpRsvd) * pagesize + swapTotal := uint64(m.PgSpTotal) * pagesize + ret := SwapMemoryStat{ + Total: swapTotal, + Free: uint64(m.PgSpFree) * pagesize, + Used: swapUsed, + UsedPercent: float64(100*swapUsed) / float64(swapTotal), + Sin: uint64(m.PgSpIn), + Sout: uint64(m.PgSpOut), + PgIn: uint64(m.PageIn), + PgOut: uint64(m.PageOut), + PgFault: uint64(m.PageFaults), + } + return &ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go new file mode 100644 index 0000000..09ffd8e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go @@ -0,0 +1,81 @@ +//go:build aix && !cgo +// +build aix,!cgo + +package mem + +import ( + "context" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +var whiteSpaces = regexp.MustCompile(`\s+`) + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + vmem, swap, err := callSVMon(ctx) + if err != nil { + return nil, err + } + if vmem.Total == 0 { + return nil, common.ErrNotImplementedError + } + vmem.SwapTotal = swap.Total + vmem.SwapFree = swap.Free + return vmem, nil +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + _, swap, err := callSVMon(ctx) + if err != nil { + return nil, err + } + if swap.Total == 0 { + return nil, common.ErrNotImplementedError + } + return swap, nil +} + +func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "svmon", "-G") + if err != nil { + return nil, nil, err + } + + pagesize := uint64(4096) + vmem := &VirtualMemoryStat{} + swap := &SwapMemoryStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "memory") { + p := whiteSpaces.Split(line, 7) + if len(p) > 2 { + if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { + vmem.Total = t * pagesize + } + if t, err := strconv.ParseUint(p[2], 10, 64); err == nil { + vmem.Used = t * pagesize + if vmem.Total > 0 { + vmem.UsedPercent = 100 * float64(vmem.Used) / float64(vmem.Total) + } + } + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + vmem.Free = t * pagesize + } + } + } else if strings.HasPrefix(line, "pg space") { + p := whiteSpaces.Split(line, 4) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[2], 10, 64); err == nil { + swap.Total = t * pagesize + } + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + swap.Free = swap.Total - t * pagesize + } + } + break + } + } + return vmem, swap, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go index a0b8be0..ce930fb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go @@ -6,7 +6,6 @@ package mem import ( "context" "fmt" - "os/exec" "strconv" "strings" ) @@ -25,11 +24,7 @@ func SwapDevices() ([]*SwapDevice, error) { } func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - swapCommandPath, err := exec.LookPath(swapCommand) - if err != nil { - return nil, fmt.Errorf("could not find command %q: %w", swapCommand, err) - } - output, err := invoke.CommandWithContext(ctx, swapCommandPath, "-lk") + output, err := invoke.CommandWithContext(ctx, swapCommand, "-lk") if err != nil { return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err) } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go index 82c3824..e5da7dc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go @@ -5,6 +5,7 @@ package mem /* #include +#include */ import "C" @@ -12,8 +13,6 @@ import ( "context" "fmt" "unsafe" - - "golang.org/x/sys/unix" ) // VirtualMemory returns VirtualmemoryStat. @@ -34,7 +33,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return nil, fmt.Errorf("host_statistics error=%d", status) } - pageSize := uint64(unix.Getpagesize()) + pageSize := uint64(C.vm_kernel_page_size) total, err := getHwMemsize() if err != nil { return nil, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go index be926f2..c939316 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go @@ -5,7 +5,6 @@ package mem import ( "context" - "os/exec" "strconv" "strings" @@ -14,11 +13,7 @@ import ( // Runs vm_stat and returns Free and inactive pages func getVMStat(vms *VirtualMemoryStat) error { - vm_stat, err := exec.LookPath("vm_stat") - if err != nil { - return err - } - out, err := invoke.Command(vm_stat) + out, err := invoke.Command("vm_stat") if err != nil { return err } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go index 9a5967c..9a5d693 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go @@ -292,6 +292,18 @@ func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, err return ret, retEx, err } ret.HugePagesFree = t + case "HugePages_Rsvd": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesRsvd = t + case "HugePages_Surp": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesSurp = t case "Hugepagesize": t, err := strconv.ParseUint(value, 10, 64) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go index 4d9b6cc..9764492 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go @@ -9,8 +9,6 @@ import ( "encoding/binary" "errors" "fmt" - "os/exec" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" ) @@ -76,12 +74,7 @@ func SwapMemory() (*SwapMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - swapctl, err := exec.LookPath("swapctl") - if err != nil { - return nil, err - } - - out, err := invoke.CommandWithContext(ctx, swapctl, "-sk") + out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk") if err != nil { return &SwapMemoryStat{}, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go new file mode 100644 index 0000000..2488f18 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go @@ -0,0 +1,38 @@ +//go:build openbsd && arm +// +build openbsd,arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go index 7f6a6fd..c911267 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go @@ -5,13 +5,14 @@ package mem import ( "context" + "errors" "fmt" - "os/exec" "regexp" "strconv" "strings" "github.com/shirou/gopsutil/v3/internal/common" + "github.com/tklauser/go-sysconf" ) // VirtualMemory for Solaris is a minimal implementation which only returns @@ -34,6 +35,13 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return nil, err } result.Total = cap + freemem, err := globalZoneFreeMemory(ctx) + if err != nil { + return nil, err + } + result.Available = freemem + result.Free = freemem + result.Used = result.Total - result.Free } else { cap, err := nonGlobalZoneMemoryCapacity() if err != nil { @@ -54,13 +62,8 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { } func zoneName() (string, error) { - zonename, err := exec.LookPath("zonename") - if err != nil { - return "", err - } - ctx := context.Background() - out, err := invoke.CommandWithContext(ctx, zonename) + out, err := invoke.CommandWithContext(ctx, "zonename") if err != nil { return "", err } @@ -71,20 +74,15 @@ func zoneName() (string, error) { var globalZoneMemoryCapacityMatch = regexp.MustCompile(`[Mm]emory size: (\d+) Megabytes`) func globalZoneMemoryCapacity() (uint64, error) { - prtconf, err := exec.LookPath("prtconf") - if err != nil { - return 0, err - } - ctx := context.Background() - out, err := invoke.CommandWithContext(ctx, prtconf) + out, err := invoke.CommandWithContext(ctx, "prtconf") if err != nil { return 0, err } match := globalZoneMemoryCapacityMatch.FindAllStringSubmatch(string(out), -1) if len(match) != 1 { - return 0, fmt.Errorf("memory size not contained in output of %q", prtconf) + return 0, errors.New("memory size not contained in output of prtconf") } totalMB, err := strconv.ParseUint(match[0][1], 10, 64) @@ -95,16 +93,30 @@ func globalZoneMemoryCapacity() (uint64, error) { return totalMB * 1024 * 1024, nil } -var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) +func globalZoneFreeMemory(ctx context.Context) (uint64, error) { + output, err := invoke.CommandWithContext(ctx, "pagesize") + if err != nil { + return 0, err + } -func nonGlobalZoneMemoryCapacity() (uint64, error) { - kstat, err := exec.LookPath("kstat") + pagesize, err := strconv.ParseUint(strings.TrimSpace(string(output)), 10, 64) if err != nil { return 0, err } + free, err := sysconf.Sysconf(sysconf.SC_AVPHYS_PAGES) + if err != nil { + return 0, err + } + + return uint64(free) * pagesize, nil +} + +var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) + +func nonGlobalZoneMemoryCapacity() (uint64, error) { ctx := context.Background() - out, err := invoke.CommandWithContext(ctx, kstat, "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") + out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") if err != nil { return 0, err } @@ -141,11 +153,7 @@ func SwapDevices() ([]*SwapDevice, error) { } func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - swapCommandPath, err := exec.LookPath(swapCommand) - if err != nil { - return nil, fmt.Errorf("could not find command %q: %w", swapCommand, err) - } - output, err := invoke.CommandWithContext(ctx, swapCommandPath, "-l") + output, err := invoke.CommandWithContext(ctx, swapCommand, "-l") if err != nil { return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err) } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v3/net/net.go index d2a1b82..0f3a62f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net.go @@ -22,7 +22,6 @@ type IOCountersStat struct { Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending - } // Addr is implemented compatibility to psutil diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go index 84e3609..81feaa8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go @@ -6,7 +6,6 @@ package net import ( "context" "fmt" - "os/exec" "regexp" "strconv" "strings" @@ -15,96 +14,10 @@ import ( "github.com/shirou/gopsutil/v3/internal/common" ) -func parseNetstatI(output string) ([]IOCountersStat, error) { - lines := strings.Split(string(output), "\n") - ret := make([]IOCountersStat, 0, len(lines)-1) - exists := make([]string, 0, len(ret)) - - // Check first line is header - if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" { - return nil, fmt.Errorf("not a 'netstat -i' output") - } - - for _, line := range lines[1:] { - values := strings.Fields(line) - if len(values) < 1 || values[0] == "Name" { - continue - } - if common.StringsHas(exists, values[0]) { - // skip if already get - continue - } - exists = append(exists, values[0]) - - if len(values) < 9 { - continue - } - - base := 1 - // sometimes Address is omitted - if len(values) < 10 { - base = 0 - } - - parsed := make([]uint64, 0, 5) - vv := []string{ - values[base+3], // Ipkts == PacketsRecv - values[base+4], // Ierrs == Errin - values[base+5], // Opkts == PacketsSent - values[base+6], // Oerrs == Errout - values[base+8], // Drops == Dropout - } - - for _, target := range vv { - if target == "-" { - parsed = append(parsed, 0) - continue - } - - t, err := strconv.ParseUint(target, 10, 64) - if err != nil { - return nil, err - } - parsed = append(parsed, t) - } - - n := IOCountersStat{ - Name: values[0], - PacketsRecv: parsed[0], - Errin: parsed[1], - PacketsSent: parsed[2], - Errout: parsed[3], - Dropout: parsed[4], - } - ret = append(ret, n) - } - return ret, nil -} - func IOCounters(pernic bool) ([]IOCountersStat, error) { return IOCountersWithContext(context.Background(), pernic) } -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - netstat, err := exec.LookPath("netstat") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, netstat, "-idn") - if err != nil { - return nil, err - } - - iocounters, err := parseNetstatI(string(out)) - if err != nil { - return nil, err - } - if pernic == false { - return getIOCountersAll(iocounters) - } - return iocounters, nil -} - // IOCountersByFile exists just for compatibility with Linux. func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { return IOCountersByFileWithContext(context.Background(), pernic, filename) @@ -360,11 +273,7 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, args = append(args, "-funix") } - netstat, err := exec.LookPath("netstat") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, netstat, args...) + out, err := invoke.CommandWithContext(ctx, "netstat", args...) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go new file mode 100644 index 0000000..8cf8c91 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go @@ -0,0 +1,36 @@ +//go:build aix && cgo +// +build aix,cgo + +package net + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + ifs, err := perfstat.NetIfaceStat() + if err != nil { + return nil, err + } + + iocounters := make([]IOCountersStat, 0, len(ifs)) + for _, netif := range ifs { + n := IOCountersStat{ + Name: netif.Name, + BytesSent: uint64(netif.OBytes), + BytesRecv: uint64(netif.IBytes), + PacketsSent: uint64(netif.OPackets), + PacketsRecv: uint64(netif.IPackets), + Errin: uint64(netif.OErrors), + Errout: uint64(netif.IErrors), + Dropout: uint64(netif.XmitDrops), + } + iocounters = append(iocounters, n) + } + if pernic == false { + return getIOCountersAll(iocounters) + } + return iocounters, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go new file mode 100644 index 0000000..e3fce90 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go @@ -0,0 +1,95 @@ +//go:build aix && !cgo +// +build aix,!cgo + +package net + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +func parseNetstatI(output string) ([]IOCountersStat, error) { + lines := strings.Split(string(output), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + // Check first line is header + if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" { + return nil, fmt.Errorf("not a 'netstat -i' output") + } + + for _, line := range lines[1:] { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 9 { + continue + } + + base := 1 + // sometimes Address is omitted + if len(values) < 10 { + base = 0 + } + + parsed := make([]uint64, 0, 5) + vv := []string{ + values[base+3], // Ipkts == PacketsRecv + values[base+4], // Ierrs == Errin + values[base+5], // Opkts == PacketsSent + values[base+6], // Oerrs == Errout + values[base+8], // Drops == Dropout + } + + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + PacketsSent: parsed[2], + Errout: parsed[3], + Dropout: parsed[4], + } + ret = append(ret, n) + } + return ret, nil +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + out, err := invoke.CommandWithContext(ctx, "netstat", "-idn") + if err != nil { + return nil, err + } + + iocounters, err := parseNetstatI(string(out)) + if err != nil { + return nil, err + } + if pernic == false { + return getIOCountersAll(iocounters) + } + return iocounters, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go index ec9ba41..1c8d4f4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go @@ -207,11 +207,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } else { // duplicated interface, list all interfaces - ifconfig, err := exec.LookPath("ifconfig") - if err != nil { - return nil, err - } - if out, err = invoke.CommandWithContext(ctx, ifconfig, "-l"); err != nil { + if out, err = invoke.CommandWithContext(ctx, "ifconfig", "-l"); err != nil { return nil, err } interfaceNames := strings.Fields(strings.TrimRight(string(out), endOfLine)) diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go index 58325f6..e136be1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go @@ -1,5 +1,5 @@ -//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris +// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris package net diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go index 50fe93a..7f31851 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go @@ -5,7 +5,6 @@ package net import ( "context" - "os/exec" "strconv" "strings" @@ -17,11 +16,7 @@ func IOCounters(pernic bool) ([]IOCountersStat, error) { } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - netstat, err := exec.LookPath("netstat") - if err != nil { - return nil, err - } - out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + out, err := invoke.CommandWithContext(ctx, "netstat", "-ibdnW") if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go index 0d909fc..c7cd0db 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go @@ -161,7 +161,7 @@ var netProtocols = []string{ // If protocols is empty then all protocols are returned, otherwise // just the protocols in the list are returned. // Available protocols: -// ip,icmp,icmpmsg,tcp,udp,udplite +// [ip,icmp,icmpmsg,tcp,udp,udplite] func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { return ProtoCountersWithContext(context.Background(), protocols) } @@ -392,7 +392,7 @@ func Connections(kind string) ([]ConnectionStat, error) { } func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsPid(kind, 0) + return ConnectionsPidWithContext(ctx, kind, 0) } // Return a list of network connections opened returning at most `max` @@ -402,7 +402,7 @@ func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { } func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMax(kind, 0, max) + return ConnectionsPidMaxWithContext(ctx, kind, 0, max) } // Return a list of network connections opened, omitting `Uids`. @@ -463,7 +463,7 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAll(root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, max) } else { inodes, err = getProcInodes(root, pid, max) if len(inodes) == 0 { @@ -474,10 +474,14 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p if err != nil { return nil, fmt.Errorf("cound not get pid(s), %d: %w", pid, err) } - return statsFromInodes(root, pid, tmap, inodes, skipUids) + return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) } func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { + return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids) +} + +func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { dupCheckMap := make(map[string]struct{}) var ret []ConnectionStat @@ -493,7 +497,7 @@ func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inode } switch t.family { case syscall.AF_INET, syscall.AF_INET6: - ls, err = processInet(path, t, inodes, pid) + ls, err = processInetWithContext(ctx, path, t, inodes, pid) case syscall.AF_UNIX: ls, err = processUnix(path, t, inodes, pid) } @@ -666,7 +670,11 @@ func (p *process) fillFromStatus() error { } func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - pids, err := Pids() + return getProcInodesAllWithContext(context.Background(), root, max) +} + +func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { + pids, err := PidsWithContext(ctx) if err != nil { return nil, err } @@ -695,6 +703,10 @@ func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { // "0500000A:0016" -> "10.0.0.5", 22 // "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 func decodeAddress(family uint32, src string) (Addr, error) { + return decodeAddressWithContext(context.Background(), family, src) +} + +func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) { t := strings.Split(src, ":") if len(t) != 2 { return Addr{}, fmt.Errorf("does not contain port, %s", src) @@ -709,11 +721,15 @@ func decodeAddress(family uint32, src string) (Addr, error) { return Addr{}, fmt.Errorf("decode error, %w", err) } var ip net.IP - // Assumes this is little_endian + if family == syscall.AF_INET { - ip = net.IP(Reverse(decoded)) + if common.IsLittleEndian() { + ip = net.IP(ReverseWithContext(ctx, decoded)) + } else { + ip = net.IP(decoded) + } } else { // IPv6 - ip, err = parseIPv6HexString(decoded) + ip, err = parseIPv6HexStringWithContext(ctx, decoded) if err != nil { return Addr{}, err } @@ -738,19 +754,27 @@ func ReverseWithContext(ctx context.Context, s []byte) []byte { // parseIPv6HexString parse array of bytes to IPv6 string func parseIPv6HexString(src []byte) (net.IP, error) { + return parseIPv6HexStringWithContext(context.Background(), src) +} + +func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) { if len(src) != 16 { return nil, fmt.Errorf("invalid IPv6 string") } buf := make([]byte, 0, 16) for i := 0; i < len(src); i += 4 { - r := Reverse(src[i : i+4]) + r := ReverseWithContext(ctx, src[i:i+4]) buf = append(buf, r...) } return net.IP(buf), nil } func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + return processInetWithContext(context.Background(), file, kind, inodes, filterPid) +} + +func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { if strings.HasSuffix(file, "6") && !common.PathExists(file) { // IPv6 not supported, return empty. return []connTmp{}, nil @@ -793,11 +817,11 @@ func processInet(file string, kind netConnectionKindType, inodes map[string][]in } else { status = "NONE" } - la, err := decodeAddress(kind.family, laddr) + la, err := decodeAddressWithContext(ctx, kind.family, laddr) if err != nil { continue } - ra, err := decodeAddress(kind.family, raddr) + ra, err := decodeAddressWithContext(ctx, kind.family, raddr) if err != nil { continue } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go new file mode 100644 index 0000000..7f1f5c8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go @@ -0,0 +1,143 @@ +//go:build solaris +// +build solaris + +package net + +import ( + "context" + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" +) + +// NetIOCounters returnes network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + // collect all the net class's links with below statistics + filterstr := "/^(?!vnic)/::phys:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + if runtime.GOOS == "illumos" { + filterstr = "/[^vnic]/::mac:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + } + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "net", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no interface found") + } + rbytes64arr := make(map[string]uint64) + ipackets64arr := make(map[string]uint64) + idrops64arr := make(map[string]uint64) + ierrorsarr := make(map[string]uint64) + obytes64arr := make(map[string]uint64) + opackets64arr := make(map[string]uint64) + odrops64arr := make(map[string]uint64) + oerrorsarr := make(map[string]uint64) + + re := regexp.MustCompile(`[:\s]+`) + for _, line := range lines { + fields := re.Split(line, -1) + interfaceName := fields[0] + instance := fields[1] + switch fields[3] { + case "rbytes64": + rbytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse rbytes64: %w", err) + } + case "ipackets64": + ipackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ipackets64: %w", err) + } + case "idrops64": + idrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idrops64: %w", err) + } + case "ierrors": + ierrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ierrors: %w", err) + } + case "obytes64": + obytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse obytes64: %w", err) + } + case "opackets64": + opackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse opackets64: %w", err) + } + case "odrops64": + odrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse odrops64: %w", err) + } + case "oerrors": + oerrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse oerrors: %w", err) + } + } + } + ret := make([]IOCountersStat, 0) + for k := range rbytes64arr { + nic := IOCountersStat{ + Name: k, + BytesRecv: rbytes64arr[k], + PacketsRecv: ipackets64arr[k], + Errin: ierrorsarr[k], + Dropin: idrops64arr[k], + BytesSent: obytes64arr[k], + PacketsSent: opackets64arr[k], + Errout: oerrorsarr[k], + Dropout: odrops64arr[k], + } + ret = append(ret, nic) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go index 2fd2224..cb846e2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go @@ -20,7 +20,7 @@ func Connections(kind string) ([]ConnectionStat, error) { } func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsPid(kind, 0) + return ConnectionsPidWithContext(ctx, kind, 0) } // Return a list of network connections opened returning at most `max` diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go index 7c72417..61b340b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go @@ -6,7 +6,6 @@ package process import ( "context" "fmt" - "os/exec" "path/filepath" "strconv" "strings" @@ -112,11 +111,7 @@ func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details pid := p.Pid - ps, err := exec.LookPath("ps") - if err != nil { - return false, err - } - out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) if err != nil { return false, err } @@ -292,11 +287,6 @@ func (p *Process) getKProc() (*unix.KinfoProc, error) { // And splited by Space. Caller have responsibility to manage. // If passed arg pid is 0, get information from all process. func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { - bin, err := exec.LookPath("ps") - if err != nil { - return [][]string{}, err - } - var cmd []string if pid == 0 { // will get from all processes. cmd = []string{"-ax", "-o", arg} @@ -308,7 +298,7 @@ func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption if nameOption { cmd = append(cmd, "-c") } - out, err := invoke.CommandWithContext(ctx, bin, cmd...) + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) if err != nil { return [][]string{}, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_386.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_386.go deleted file mode 100644 index b353e5e..0000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_386.go +++ /dev/null @@ -1,236 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go - -package process - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type UGid_t uint32 - -type KinfoProc struct { - Proc ExternProc - Eproc Eproc -} - -type Eproc struct { - Paddr *uint64 - Sess *Session - Pcred Upcred - Ucred Uucred - Pad_cgo_0 [4]byte - Vm Vmspace - Ppid int32 - Pgid int32 - Jobc int16 - Pad_cgo_1 [2]byte - Tdev int32 - Tpgid int32 - Pad_cgo_2 [4]byte - Tsess *Session - Wmesg [8]int8 - Xsize int32 - Xrssize int16 - Xccount int16 - Xswrss int16 - Pad_cgo_3 [2]byte - Flag int32 - Login [12]int8 - Spare [4]int32 - Pad_cgo_4 [4]byte -} - -type Proc struct{} - -type Session struct{} - -type ucred struct { - Link _Ctype_struct___0 - Ref uint64 - Posix Posix_cred - Label *Label - Audit Au_session -} - -type Uucred struct { - Ref int32 - UID uint32 - Ngroups int16 - Pad_cgo_0 [2]byte - Groups [16]uint32 -} - -type Upcred struct { - Pc_lock [72]int8 - Pc_ucred *ucred - P_ruid uint32 - P_svuid uint32 - P_rgid uint32 - P_svgid uint32 - P_refcnt int32 - Pad_cgo_0 [4]byte -} - -type Vmspace struct { - Dummy int32 - Pad_cgo_0 [4]byte - Dummy2 *int8 - Dummy3 [5]int32 - Pad_cgo_1 [4]byte - Dummy4 [3]*int8 -} - -type Sigacts struct{} - -type ExternProc struct { - P_un [16]byte - P_vmspace uint64 - P_sigacts uint64 - Pad_cgo_0 [3]byte - P_flag int32 - P_stat int8 - P_pid int32 - P_oppid int32 - P_dupfd int32 - Pad_cgo_1 [4]byte - User_stack uint64 - Exit_thread uint64 - P_debugger int32 - Sigwait int32 - P_estcpu uint32 - P_cpticks int32 - P_pctcpu uint32 - Pad_cgo_2 [4]byte - P_wchan uint64 - P_wmesg uint64 - P_swtime uint32 - P_slptime uint32 - P_realtimer Itimerval - P_rtime Timeval - P_uticks uint64 - P_sticks uint64 - P_iticks uint64 - P_traceflag int32 - Pad_cgo_3 [4]byte - P_tracep uint64 - P_siglist int32 - Pad_cgo_4 [4]byte - P_textvp uint64 - P_holdcnt int32 - P_sigmask uint32 - P_sigignore uint32 - P_sigcatch uint32 - P_priority uint8 - P_usrpri uint8 - P_nice int8 - P_comm [17]int8 - Pad_cgo_5 [4]byte - P_pgrp uint64 - P_addr uint64 - P_xstat uint16 - P_acflag uint16 - Pad_cgo_6 [4]byte - P_ru uint64 -} - -type Itimerval struct { - Interval Timeval - Value Timeval -} - -type Vnode struct{} - -type Pgrp struct{} - -type UserStruct struct{} - -type Au_session struct { - Aia_p *AuditinfoAddr - Mask AuMask -} - -type Posix_cred struct { - UID uint32 - Ruid uint32 - Svuid uint32 - Ngroups int16 - Pad_cgo_0 [2]byte - Groups [16]uint32 - Rgid uint32 - Svgid uint32 - Gmuid uint32 - Flags int32 -} - -type Label struct{} - -type AuditinfoAddr struct { - Auid uint32 - Mask AuMask - Termid AuTidAddr - Asid int32 - Flags uint64 -} - -type AuMask struct { - Success uint32 - Failure uint32 -} - -type AuTidAddr struct { - Port int32 - Type uint32 - Addr [4]uint32 -} - -type UcredQueue struct { - Next *ucred - Prev **ucred -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go index 2ac413f..858f08e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go @@ -175,6 +175,7 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { @@ -187,6 +188,7 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { @@ -204,6 +206,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { const tiSize = C.sizeof_struct_proc_taskinfo ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go index 3d696b7..bc1d357 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go @@ -6,7 +6,6 @@ package process import ( "context" "fmt" - "os/exec" "strconv" "strings" @@ -19,11 +18,7 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - lsof_bin, err := exec.LookPath("lsof") - if err != nil { - return "", err - } - out, err := invoke.CommandWithContext(ctx, lsof_bin, "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") + out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") if err != nil { return "", fmt.Errorf("bad call to lsof: %s", err) } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go index 444af35..779f812 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go @@ -6,7 +6,6 @@ package process import ( "bytes" "context" - "os/exec" "path/filepath" "strconv" "strings" @@ -115,7 +114,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { if err != nil { return 0, err } - return k.Start.Sec*1000 + k.Start.Usec/1000, nil + return int64(k.Start.Sec)*1000 + int64(k.Start.Usec)/1000, nil } func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { @@ -147,11 +146,7 @@ func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details pid := p.Pid - ps, err := exec.LookPath("ps") - if err != nil { - return false, err - } - out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) if err != nil { return false, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index 8b7d3fd..d5b5bc3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -83,7 +83,7 @@ func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { func (p *Process) NameWithContext(ctx context.Context) (string, error) { if p.name == "" { - if err := p.fillNameWithContext(); err != nil { + if err := p.fillNameWithContext(ctx); err != nil { return "", err } } @@ -92,7 +92,7 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { if p.tgid == 0 { - if err := p.fillFromStatusWithContext(); err != nil { + if err := p.fillFromStatusWithContext(ctx); err != nil { return 0, err } } @@ -124,7 +124,7 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { } func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return []string{""}, err } @@ -149,7 +149,7 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { } func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return []int32{}, err } @@ -157,7 +157,7 @@ func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { } func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return []int32{}, err } @@ -165,7 +165,7 @@ func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { } func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return []int32{}, err } @@ -211,7 +211,7 @@ func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ( if err != nil { return nil, err } - if err := p.fillFromStatusWithContext(); err != nil { + if err := p.fillFromStatusWithContext(ctx); err != nil { return nil, err } @@ -261,7 +261,7 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e } func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return nil, err } @@ -274,7 +274,7 @@ func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { } func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - err := p.fillFromStatusWithContext() + err := p.fillFromStatusWithContext(ctx) if err != nil { return 0, err } @@ -380,10 +380,16 @@ func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { pid := p.Pid var ret []MemoryMapsStat + smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps") if grouped { ret = make([]MemoryMapsStat, 1) + // If smaps_rollup exists (require kernel >= 4.15), then we will use it + // for pre-summed memory information for a process. + smapsRollupPath := common.HostProc(strconv.Itoa(int(pid)), "smaps_rollup") + if _, err := os.Stat(smapsRollupPath); !os.IsNotExist(err) { + smapsPath = smapsRollupPath + } } - smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps") contents, err := ioutil.ReadFile(smapsPath) if err != nil { return nil, err @@ -682,9 +688,9 @@ func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string if len(cmdline) == 0 { return nil, nil } - if cmdline[len(cmdline)-1] == 0 { - cmdline = cmdline[:len(cmdline)-1] - } + + cmdline = bytes.TrimRight(cmdline, "\x00") + parts := bytes.Split(cmdline, []byte{0}) var strParts []string for _, p := range parts { @@ -783,12 +789,12 @@ func (p *Process) fillFromStatmWithContext() (*MemoryInfoStat, *MemoryInfoExStat } // Get name from /proc/(pid)/comm or /proc/(pid)/status -func (p *Process) fillNameWithContext() error { +func (p *Process) fillNameWithContext(ctx context.Context) error { err := p.fillFromCommWithContext() if err == nil && p.name != "" && len(p.name) < 15 { return nil } - return p.fillFromStatusWithContext() + return p.fillFromStatusWithContext(ctx) } // Get name from /proc/(pid)/comm @@ -805,7 +811,11 @@ func (p *Process) fillFromCommWithContext() error { } // Get various status from /proc/(pid)/status -func (p *Process) fillFromStatusWithContext() error { +func (p *Process) fillFromStatus() error { + return p.fillFromStatusWithContext(context.Background()) +} + +func (p *Process) fillFromStatusWithContext(ctx context.Context) error { pid := p.Pid statPath := common.HostProc(strconv.Itoa(int(pid)), "status") contents, err := ioutil.ReadFile(statPath) @@ -826,7 +836,7 @@ func (p *Process) fillFromStatusWithContext() error { case "Name": p.name = strings.Trim(value, " \t") if len(p.name) >= 15 { - cmdlineSlice, err := p.CmdlineSlice() + cmdlineSlice, err := p.CmdlineSliceWithContext(ctx) if err != nil { return err } @@ -1003,6 +1013,10 @@ func (p *Process) fillFromStatusWithContext() error { return nil } +func (p *Process) fillFromTIDStat(tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStatWithContext(context.Background(), tid) +} + func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { pid := p.Pid var statPath string diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go index 77aa804..cbb1a77 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go @@ -9,7 +9,6 @@ import ( "encoding/binary" "fmt" "io" - "os/exec" "path/filepath" "strconv" "strings" @@ -167,11 +166,7 @@ func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details pid := p.Pid - ps, err := exec.LookPath("ps") - if err != nil { - return false, err - } - out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) if err != nil { return false, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go new file mode 100644 index 0000000..b94429f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go @@ -0,0 +1,202 @@ +//go:build openbsd && arm +// +build openbsd,arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x38 + sizeOfKinfoProc = 0x264 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint32 + End uint32 + Guard uint32 + Fspace uint32 + Fspace_augment uint32 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go index d7bb53e..14ed030 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "os" + "path/filepath" "reflect" "strings" "syscall" @@ -285,8 +286,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { } return false, err } - const STILL_ACTIVE = 259 // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - h, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) if err == windows.ERROR_ACCESS_DENIED { return true, nil } @@ -296,10 +296,9 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { if err != nil { return false, err } - defer syscall.CloseHandle(syscall.Handle(h)) - var exitCode uint32 - err = windows.GetExitCodeProcess(h, &exitCode) - return exitCode == STILL_ACTIVE, err + defer windows.CloseHandle(h) + event, err := windows.WaitForSingleObject(h, 0) + return event == uint32(windows.WAIT_TIMEOUT), err } func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { @@ -321,18 +320,19 @@ func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { } func (p *Process) NameWithContext(ctx context.Context) (string, error) { - ppid, _, name, err := getFromSnapProcess(p.Pid) - if err != nil { - return "", fmt.Errorf("could not get Name: %s", err) + if p.Pid == 0 { + return "System Idle Process", nil + } + if p.Pid == 4 { + return "System", nil } - // if no errors and not cached already, cache ppid - p.parent = ppid - if 0 == p.getPpid() { - p.setPpid(ppid) + exe, err := p.ExeWithContext(ctx) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) } - return name, nil + return filepath.Base(exe), nil } func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { @@ -410,7 +410,7 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { } if userProcParams.CurrentDirectoryPathNameLength > 0 { cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength)) - if len(cwd) != int(userProcParams.CurrentDirectoryPathAddress) { + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { return "", errors.New("cannot read current working directory") } @@ -697,8 +697,8 @@ func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, er // release the new handle defer windows.CloseHandle(windows.Handle(file)) - fileType, _ := windows.GetFileType(windows.Handle(file)) - if fileType != windows.FILE_TYPE_DISK { + fileType, err := windows.GetFileType(windows.Handle(file)) + if err != nil || fileType != windows.FILE_TYPE_DISK { continue } @@ -720,8 +720,8 @@ func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, er case <-time.NewTimer(100 * time.Millisecond).C: continue case <-ch: - fileInfo, _ := os.Stat(fileName) - if fileInfo.IsDir() { + fileInfo, err := os.Stat(fileName) + if err != nil || fileInfo.IsDir() { continue } @@ -989,15 +989,9 @@ func is32BitProcess(h windows.Handle) bool { var procIs32Bits bool switch processorArchitecture { - case PROCESSOR_ARCHITECTURE_INTEL: - fallthrough - case PROCESSOR_ARCHITECTURE_ARM: + case PROCESSOR_ARCHITECTURE_INTEL, PROCESSOR_ARCHITECTURE_ARM: procIs32Bits = true - case PROCESSOR_ARCHITECTURE_ARM64: - fallthrough - case PROCESSOR_ARCHITECTURE_IA64: - fallthrough - case PROCESSOR_ARCHITECTURE_AMD64: + case PROCESSOR_ARCHITECTURE_ARM64, PROCESSOR_ARCHITECTURE_IA64, PROCESSOR_ARCHITECTURE_AMD64: var wow64 uint ret, _, _ := common.ProcNtQueryInformationProcess.Call( diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d2..95d8e59 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 0000000..da86790 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 0000000..1701af2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd122..7880b8f 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6..339515b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b471..7594487 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac440..fa1245b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df..880853f 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d..960bf6f 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 37dd83a..6be2c35 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,22 +1,23 @@ env: CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.19.1 freebsd_12_task: freebsd_instance: - image_family: freebsd-12-2 + image_family: freebsd-12-3 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go get golang.org/dl/go1.17 - bin/go1.17 download - build_script: bin/go1.17 build -v ./... - test_script: bin/go1.17 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... freebsd_13_task: freebsd_instance: image_family: freebsd-13-0 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go get golang.org/dl/go1.17 - bin/go1.17 download - build_script: bin/go1.17 build -v ./... - test_script: bin/go1.17 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... diff --git a/vendor/github.com/tklauser/go-sysconf/LICENSE b/vendor/github.com/tklauser/go-sysconf/LICENSE index cf198de..73c6b89 100644 --- a/vendor/github.com/tklauser/go-sysconf/LICENSE +++ b/vendor/github.com/tklauser/go-sysconf/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2018-2021, Tobias Klauser +Copyright (c) 2018-2022, Tobias Klauser All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/tklauser/go-sysconf/README.md b/vendor/github.com/tklauser/go-sysconf/README.md index 31bd1cb..b83d5ab 100644 --- a/vendor/github.com/tklauser/go-sysconf/README.md +++ b/vendor/github.com/tklauser/go-sysconf/README.md @@ -2,7 +2,6 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/tklauser/go-sysconf.svg)](https://pkg.go.dev/github.com/tklauser/go-sysconf) [![GitHub Action Status](https://github.com/tklauser/go-sysconf/workflows/Tests/badge.svg)](https://github.com/tklauser/go-sysconf/actions?query=workflow%3ATests) -[![Go Report Card](https://goreportcard.com/badge/github.com/tklauser/go-sysconf)](https://goreportcard.com/report/github.com/tklauser/go-sysconf) `sysconf` for Go, without using cgo or external binaries (e.g. getconf). diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go index 7b7d020..325d4a6 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -15,8 +15,14 @@ const ( _LOGIN_NAME_MAX = _MAXLOGNAME + 1 _SYMLOOP_MAX = _MAXSYMLINKS - _POSIX2_C_DEV = -1 - _POSIX2_UPE = -1 + _POSIX2_C_BIND = 1 + _POSIX2_C_DEV = -1 + _POSIX2_CHAR_TERM = -1 + _POSIX2_FORT_DEV = -1 + _POSIX2_FORT_RUN = -1 + _POSIX2_LOCALEDEF = -1 + _POSIX2_SW_DEV = -1 + _POSIX2_UPE = -1 ) var ( @@ -26,14 +32,7 @@ var ( func sysconfPOSIX(name int) (int64, error) { // NetBSD does not define all _POSIX_* values used in sysconf_posix.go - // Handle the supported ones here. - switch name { - case SC_SHELL: - return _POSIX_SHELL, nil - case SC_VERSION: - return _POSIX_VERSION, nil - } - + // The supported ones are handled in sysconf below. return -1, errInvalid } @@ -43,6 +42,8 @@ func sysconf(name int) (int64, error) { // Duplicate the relevant values here. switch name { + + // 1003.1 case SC_ARG_MAX: return sysctl32("kern.argmax"), nil case SC_CHILD_MAX: @@ -53,11 +54,6 @@ func sysconf(name int) (int64, error) { } } return -1, nil - case SC_STREAM_MAX: - // sysctl("user.stream_max") - return _FOPEN_MAX, nil - case SC_TTY_NAME_MAX: - return pathconf(_PATH_DEV, _PC_NAME_MAX), nil case SC_CLK_TCK: clktckOnce.Do(func() { clktck = -1 @@ -76,15 +72,30 @@ func sysconf(name int) (int64, error) { return int64(rlim.Cur), nil } return -1, nil + case SC_STREAM_MAX: + // sysctl("user.stream_max") + return _FOPEN_MAX, nil case SC_TZNAME_MAX: // sysctl("user.tzname_max") return _NAME_MAX, nil + case SC_SAVED_IDS: + return yesno(sysctl32("kern.saved_ids")), nil + case SC_VERSION: + return sysctl32("kern.posix1version"), nil // 1003.1b case SC_FSYNC: return sysctl32("kern.fsync"), nil + case SC_SYNCHRONIZED_IO: + return sysctl32("kern.synchronized_io"), nil case SC_MAPPED_FILES: return sysctl32("kern.mapped_files"), nil + case SC_MEMLOCK: + return sysctl32("kern.memlock"), nil + case SC_MEMLOCK_RANGE: + return sysctl32("kern.memlock_range"), nil + case SC_MEMORY_PROTECTION: + return sysctl32("kern.memory_protection"), nil case SC_MONOTONIC_CLOCK: return sysctl32("kern.monotonic_clock"), nil case SC_SEMAPHORES: @@ -100,19 +111,46 @@ func sysconf(name int) (int64, error) { // 1003.1j case SC_BARRIERS: - return sysctl32("kern.posix_barriers"), nil + return yesno(sysctl32("kern.posix_barriers")), nil + case SC_SPIN_LOCKS: + return yesno(sysctl32("kern.posix_spin_locks")), nil + case SC_READER_WRITER_LOCKS: + return yesno(sysctl32("kern.posix_reader_writer_locks")), nil // 1003.2 case SC_2_VERSION: - // sysctl("user.posix2_version") + // sysctl user.posix2_version return _POSIX2_VERSION, nil + case SC_2_C_BIND: + // sysctl user.posix2_c_bind + return _POSIX2_C_BIND, nil + case SC_2_C_DEV: + // sysctl user.posix2_c_dev + return _POSIX2_C_DEV, nil + case SC_2_CHAR_TERM: + // sysctl user.posix2_char_term + return _POSIX2_CHAR_TERM, nil + case SC_2_FORT_DEV: + // sysctl user.posix2_fort_dev + return _POSIX2_FORT_DEV, nil + case SC_2_FORT_RUN: + // sysctl user.posix2_fort_run + return _POSIX2_FORT_RUN, nil + case SC_2_LOCALEDEF: + // sysctl user.posix2_localedef + return _POSIX2_LOCALEDEF, nil + case SC_2_SW_DEV: + // sysctl user.posix2_sw_dev + return _POSIX2_SW_DEV, nil case SC_2_UPE: - // sysctl("user.posix2_upe") + // sysctl user.posix2_upe return _POSIX2_UPE, nil // XPG 4.2 case SC_IOV_MAX: return sysctl32("kern.iov_max"), nil + case SC_XOPEN_SHM: + return yesno(sysctl32("kern.ipc.sysvshm")), nil // 1003.1-2001, XSI Option Group case SC_AIO_LISTIO_MAX: @@ -120,15 +158,39 @@ func sysconf(name int) (int64, error) { case SC_AIO_MAX: return sysctl32("kern.aio_max"), nil case SC_ASYNCHRONOUS_IO: - return sysctl32("kern.posix_aio"), nil + return yesno(sysctl32("kern.posix_aio")), nil + case SC_MESSAGE_PASSING: + return yesno(sysctl32("kern.posix_msg")), nil case SC_MQ_OPEN_MAX: return sysctl32("kern.mqueue.mq_open_max"), nil case SC_MQ_PRIO_MAX: return sysctl32("kern.mqueue.mq_prio_max"), nil + case SC_PRIORITY_SCHEDULING: + return yesno(sysctl32("kern.posix_sched")), nil case SC_ATEXIT_MAX: // sysctl("user.atexit_max") return -1, nil // TODO + // 1003.1-2001, TSF + case SC_GETGR_R_SIZE_MAX: + return _GETGR_R_SIZE_MAX, nil + case SC_GETPW_R_SIZE_MAX: + return _GETPW_R_SIZE_MAX, nil + + // Unsorted + case SC_HOST_NAME_MAX: + return _MAXHOSTNAMELEN, nil + case SC_PASS_MAX: + return _PASSWORD_LEN, nil + case SC_REGEXP: + return _POSIX_REGEXP, nil + case SC_SHARED_MEMORY_OBJECTS: + return _POSIX_SHARED_MEMORY_OBJECTS, nil + case SC_SHELL: + return _POSIX_SHELL, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + // Extensions case SC_NPROCESSORS_CONF: return sysctl32("hw.ncpu"), nil @@ -140,6 +202,12 @@ func sysconf(name int) (int64, error) { return sysctl64("hw.physmem64") / int64(unix.Getpagesize()), nil // Native + case SC_SCHED_RT_TS: + return sysctl32("kern.sched.rtts"), nil + case SC_SCHED_PRI_MIN: + return sysctl32("kern.sched.pri_min"), nil + case SC_SCHED_PRI_MAX: + return sysctl32("kern.sched.pri_max"), nil case SC_THREAD_DESTRUCTOR_ITERATIONS: return _POSIX_THREAD_DESTRUCTOR_ITERATIONS, nil case SC_THREAD_KEYS_MAX: @@ -148,6 +216,34 @@ func sysconf(name int) (int64, error) { return int64(unix.Getpagesize()), nil case SC_THREAD_THREADS_MAX: return sysctl32("kern.maxproc"), nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_PRIORITY_SCHEDULING, + SC_THREAD_PRIO_INHERIT, + SC_THREAD_PROCESS_SHARED: + return -1, nil + case SC_TTY_NAME_MAX: + return pathconf(_PATH_DEV, _PC_NAME_MAX), nil + case SC_TIMER_MAX: + return _POSIX_TIMER_MAX, nil + case SC_SEM_NSEMS_MAX: + return _LONG_MAX, nil + case SC_CPUTIME: + return _POSIX_CPUTIME, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_DELAYTIMER_MAX: + return _POSIX_DELAYTIMER_MAX, nil + case SC_SIGQUEUE_MAX: + return _POSIX_SIGQUEUE_MAX, nil + case SC_REALTIME_SIGNALS: + return 200112, nil } return sysconfGeneric(name) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go index 6b81bad..d2aaf07 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs sysconf_defs_netbsd.go //go:build netbsd @@ -7,61 +7,110 @@ package sysconf const ( - SC_AIO_LISTIO_MAX = 0x33 - SC_AIO_MAX = 0x34 SC_ARG_MAX = 0x1 - SC_ATEXIT_MAX = 0x28 + SC_CHILD_MAX = 0x2 + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_JOB_CONTROL = 0x6 + SC_SAVED_IDS = 0x7 + SC_VERSION = 0x8 SC_BC_BASE_MAX = 0x9 SC_BC_DIM_MAX = 0xa SC_BC_SCALE_MAX = 0xb SC_BC_STRING_MAX = 0xc - SC_CHILD_MAX = 0x2 - SC_CLK_TCK = 0x27 SC_COLL_WEIGHTS_MAX = 0xd SC_EXPR_NEST_MAX = 0xe - SC_HOST_NAME_MAX = 0x45 - SC_IOV_MAX = 0x20 SC_LINE_MAX = 0xf + SC_RE_DUP_MAX = 0x10 + SC_2_VERSION = 0x11 + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_STREAM_MAX = 0x1a + SC_TZNAME_MAX = 0x1b + SC_PAGESIZE = 0x1c + SC_PAGE_SIZE = 0x1c + SC_FSYNC = 0x1d + SC_XOPEN_SHM = 0x1e + SC_SYNCHRONIZED_IO = 0x1f + SC_IOV_MAX = 0x20 + SC_MAPPED_FILES = 0x21 + SC_MEMLOCK = 0x22 + SC_MEMLOCK_RANGE = 0x23 + SC_MEMORY_PROTECTION = 0x24 SC_LOGIN_NAME_MAX = 0x25 + SC_MONOTONIC_CLOCK = 0x26 + SC_CLK_TCK = 0x27 + SC_ATEXIT_MAX = 0x28 + SC_THREADS = 0x29 + SC_SEMAPHORES = 0x2a + SC_BARRIERS = 0x2b + SC_TIMERS = 0x2c + SC_SPIN_LOCKS = 0x2d + SC_READER_WRITER_LOCKS = 0x2e + SC_GETGR_R_SIZE_MAX = 0x2f + SC_GETPW_R_SIZE_MAX = 0x30 + SC_CLOCK_SELECTION = 0x31 + SC_ASYNCHRONOUS_IO = 0x32 + SC_AIO_LISTIO_MAX = 0x33 + SC_AIO_MAX = 0x34 + SC_MESSAGE_PASSING = 0x35 SC_MQ_OPEN_MAX = 0x36 SC_MQ_PRIO_MAX = 0x37 - SC_NGROUPS_MAX = 0x4 - SC_OPEN_MAX = 0x5 - SC_PAGE_SIZE = 0x1c - SC_PAGESIZE = 0x1c + SC_PRIORITY_SCHEDULING = 0x38 SC_THREAD_DESTRUCTOR_ITERATIONS = 0x39 SC_THREAD_KEYS_MAX = 0x3a SC_THREAD_STACK_MIN = 0x3b SC_THREAD_THREADS_MAX = 0x3c - SC_RE_DUP_MAX = 0x10 - SC_STREAM_MAX = 0x1a - SC_SYMLOOP_MAX = 0x49 + SC_THREAD_ATTR_STACKADDR = 0x3d + SC_THREAD_ATTR_STACKSIZE = 0x3e + SC_THREAD_PRIORITY_SCHEDULING = 0x3f + SC_THREAD_PRIO_INHERIT = 0x40 + SC_THREAD_PRIO_PROTECT = 0x41 + SC_THREAD_PROCESS_SHARED = 0x42 + SC_THREAD_SAFE_FUNCTIONS = 0x43 SC_TTY_NAME_MAX = 0x44 - SC_TZNAME_MAX = 0x1b + SC_HOST_NAME_MAX = 0x45 + SC_PASS_MAX = 0x46 + SC_REGEXP = 0x47 + SC_SHELL = 0x48 + SC_SYMLOOP_MAX = 0x49 + + SC_V6_ILP32_OFF32 = 0x4a + SC_V6_ILP32_OFFBIG = 0x4b + SC_V6_LP64_OFF64 = 0x4c + SC_V6_LPBIG_OFFBIG = 0x4d + SC_2_PBS = 0x50 + SC_2_PBS_ACCOUNTING = 0x51 + SC_2_PBS_CHECKPOINT = 0x52 + SC_2_PBS_LOCATE = 0x53 + SC_2_PBS_MESSAGE = 0x54 + SC_2_PBS_TRACK = 0x55 + + SC_SPAWN = 0x56 + SC_SHARED_MEMORY_OBJECTS = 0x57 + + SC_TIMER_MAX = 0x58 + SC_SEM_NSEMS_MAX = 0x59 + SC_CPUTIME = 0x5a + SC_THREAD_CPUTIME = 0x5b + SC_DELAYTIMER_MAX = 0x5c + SC_SIGQUEUE_MAX = 0x5d + SC_REALTIME_SIGNALS = 0x5e + + SC_PHYS_PAGES = 0x79 - SC_ASYNCHRONOUS_IO = 0x32 - SC_BARRIERS = 0x2b - SC_FSYNC = 0x1d - SC_JOB_CONTROL = 0x6 - SC_MAPPED_FILES = 0x21 - SC_SEMAPHORES = 0x2a - SC_SHELL = 0x48 - SC_THREADS = 0x29 - SC_TIMERS = 0x2c - SC_VERSION = 0x8 - - SC_2_VERSION = 0x11 - SC_2_C_DEV = 0x13 - SC_2_FORT_DEV = 0x15 - SC_2_FORT_RUN = 0x16 - SC_2_LOCALEDEF = 0x17 - SC_2_SW_DEV = 0x18 - SC_2_UPE = 0x19 - - SC_PHYS_PAGES = 0x79 - SC_MONOTONIC_CLOCK = 0x26 SC_NPROCESSORS_CONF = 0x3e9 SC_NPROCESSORS_ONLN = 0x3ea + + SC_SCHED_RT_TS = 0x7d1 + SC_SCHED_PRI_MIN = 0x7d2 + SC_SCHED_PRI_MAX = 0x7d3 ) const ( @@ -71,9 +120,22 @@ const ( _POSIX_ARG_MAX = 0x1000 _POSIX_CHILD_MAX = 0x19 + _POSIX_CPUTIME = 0x30db0 + _POSIX_DELAYTIMER_MAX = 0x20 + _POSIX_PRIORITY_SCHEDULING = 0x30db0 + _POSIX_REGEXP = 0x1 + _POSIX_SHARED_MEMORY_OBJECTS = 0x0 _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = 0x30db0 _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 - _POSIX_THREAD_KEYS_MAX = 0x100 + _POSIX_THREAD_KEYS_MAX = 0x80 + _POSIX_THREAD_PRIO_PROTECT = 0x30db0 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x30db0 + _POSIX_TIMER_MAX = 0x20 _POSIX_VERSION = 0x30db0 _POSIX2_VERSION = 0x30db0 @@ -90,8 +152,13 @@ const ( _EXPR_NEST_MAX = 0x20 _LINE_MAX = 0x800 + _GETGR_R_SIZE_MAX = 0x400 + _GETPW_R_SIZE_MAX = 0x400 + _PATH_DEV = "/dev/" _PATH_ZONEINFO = "/usr/share/zoneinfo" + + _PASSWORD_LEN = 0x80 ) const _PC_NAME_MAX = 0x4 diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go index bde7139..badc66c 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs sysconf_defs_openbsd.go //go:build openbsd diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go new file mode 100644 index 0000000..b7cff76 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go new file mode 100644 index 0000000..95a71f4 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && loong64 +// +build linux,loong64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go new file mode 100644 index 0000000..3cd64dd --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && 386 +// +build netbsd,386 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go new file mode 100644 index 0000000..02fc1d0 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && amd64 +// +build netbsd,amd64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go new file mode 100644 index 0000000..16f9b6e --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && arm +// +build netbsd,arm + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go new file mode 100644 index 0000000..e530339 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && arm64 +// +build netbsd,arm64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff +) diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 11a39e2..53c0110 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,12 +1,13 @@ env: CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.19.1 freebsd_12_task: freebsd_instance: image_family: freebsd-12-3 install_script: | - pkg install -y git go - GOBIN=$PWD/bin go install golang.org/dl/go1.17.6@latest - bin/go1.17.6 download - build_script: bin/go1.17.6 build -v ./... - test_script: bin/go1.17.6 test -race ./... + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -buildvcs=false -v ./... + test_script: bin/${GO_VERSION} test -buildvcs=false -race ./... diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md index 5371fd4..b833be5 100644 --- a/vendor/github.com/xdg-go/scram/CHANGELOG.md +++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -1,5 +1,13 @@ # CHANGELOG +## v1.1.2 - 2022-12-07 + +- Bump stringprep dependency to v1.0.4 for upstream CVE fix. + +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + ## v1.1.0 - 2022-01-16 - Add SHA-512 hash generator function for convenience. diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md index 2849637..04b9753 100644 --- a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md +++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -1,5 +1,19 @@ # CHANGELOG + +## [v1.0.4] - 2022-12-07 + +### Maintenance + +- Bump golang.org/x/text to v0.3.8 due to CVE-2022-32149 + + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + ## [v1.0.2] - 2021-03-27 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 95ffc10..a0d8185 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -27,7 +27,7 @@ type Zeroer interface { // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D = primitive.D // E represents a BSON element for a D. It is usually used inside a D. @@ -39,12 +39,12 @@ type E = primitive.E // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M = primitive.M // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 2c861b5..098ed69 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) var ( @@ -43,7 +44,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { @@ -118,11 +119,32 @@ type EncodeContext struct { type DecodeContext struct { *Registry Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type // should be used when decoding an embedded document into an empty interface. For example, if // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. + // + // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type +} + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } // ValueCodec is the interface that groups the methods to encode and decode diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 32fd142..e95cab5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -53,7 +53,7 @@ type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with // the provided RegistryBuilder. // -// There is no support for decoding map[string]interface{} becuase there is no decoder for +// There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { @@ -1463,7 +1463,7 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr if !val.CanAddr() { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) @@ -1492,16 +1492,28 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val.Set(reflect.New(val.Type().Elem())) } + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + if !val.Type().Implements(tUnmarshaler) { if !val.CanAddr() { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index c1e20f9..5f903eb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package bsoncodec provides a system for encoding values to BSON representations and decoding // values from BSON representations. This package considers both binary BSON and ExtendedJSON as // BSON representations. The types in this package enable a flexible system for handling this @@ -11,7 +17,7 @@ // 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for // retrieving them. // -// ValueEncoders and ValueDecoders +// # ValueEncoders and ValueDecoders // // The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. // The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the @@ -25,7 +31,7 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// Registry and RegistryBuilder +// # Registry and RegistryBuilder // // A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type // documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a @@ -47,15 +53,15 @@ // values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would // change the behavior so these values decode as Go int instances instead: // -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // // 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder // methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the // registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. // These methods should be used to change the behavior for all values for a specific kind. // -// Registry Lookup Procedure +// # Registry Lookup Procedure // // When looking up an encoder in a Registry, the precedence rules are as follows: // @@ -73,7 +79,7 @@ // rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is // found. // -// DefaultValueEncoders and DefaultValueDecoders +// # DefaultValueEncoders and DefaultValueDecoders // // The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and // ValueDecoders for handling a wide range of Go types, including all of the types within the diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index a15636d..eda417c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -57,11 +57,18 @@ func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWrit func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument && dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } } rtype, err := dc.LookupTypeMapEntry(valueType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 1f7acbc..e1fbef9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "encoding" "fmt" "reflect" "strconv" @@ -230,6 +231,19 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } return "", err } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -241,6 +255,7 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { keyVal := reflect.ValueOf(key) @@ -252,6 +267,12 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, v := keyVal.Interface().(KeyUnmarshaler) err = v.UnmarshalKey(key) keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() // Otherwise, go to type specific behavior default: switch keyType.Kind() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 02b9341..8064402 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -254,6 +254,7 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON // documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents // to decode to bson.Raw, use the following code: +// // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { rb.typeMap[bt] = rt @@ -298,7 +299,7 @@ func (rb *RegistryBuilder) Build() *Registry { return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: // // 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using // RegisterTypeEncoder for the interface will be selected. @@ -376,7 +377,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: // // 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using // RegisterTypeDecoder for the interface will be selected. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 6f406c1..62708c5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -34,21 +34,21 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // // The properties are defined below: // -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. // -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. // -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. // -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. // -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. // // TODO(skriptble): Add tags for undefined as nil and for null as nil. type StructTags struct { @@ -67,20 +67,20 @@ type StructTags struct { // If there is no name in the struct tag fields, the struct field name is lowercased. // The tag formats accepted are: // -// "[][,[,]]" +// "[][,[,]]" // -// `(...) bson:"[][,[,]]" (...)` +// `(...) bson:"[][,[,]]" (...)` // // An example: // -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } // // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 0000000..c40973c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 8a690e3..54c76bf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -423,7 +423,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { if ejp.canonical { return nil, invalidJSONErrorForType("object", t) } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 5e14737..ef5d837 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -86,12 +86,11 @@ type valueReader struct { // NewBSONDocumentReader returns a ValueReader using b for the underlying BSON // representation. Parameter b must be a BSON Document. -// -// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes -// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes -// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a -// thing that can return the finished []byte since it might be reallocated when appended to. func NewBSONDocumentReader(b []byte) ValueReader { + // TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the + // TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an + // TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since + // TODO it might be reallocated when appended to. return newValueReader(b) } @@ -384,9 +383,13 @@ func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) { if err != nil { return nil, 0, err } + // Make a copy of the returned byte slice because it's just a subslice from the valueReader's + // buffer and is not safe to return in the unmarshaled value. + cp := make([]byte, len(b)) + copy(cp, b) vr.pop() - return b, btype, nil + return cp, btype, nil } func (vr *valueReader) ReadBoolean() (bool, error) { @@ -737,6 +740,9 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return vr, nil } +// readBytes reads length bytes from the valueReader starting at the current offset. Note that the +// returned byte slice is a subslice from the valueReader buffer and must be converted or copied +// before returning in an unmarshaled value. func (vr *valueReader) readBytes(length int32) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("invalid length: %d", length) @@ -748,6 +754,7 @@ func (vr *valueReader) readBytes(length int32) ([]byte, error) { start := vr.offset vr.offset += int64(length) + return vr.d[start : start+int64(length)], nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a39c4ea..f95a08a 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -529,7 +529,7 @@ func (vw *valueWriter) WriteDocumentEnd() error { vw.pop() if vw.stack[vw.frame].mode == mCodeWithScope { - // We ignore the error here because of the gaurantee of writeLength. + // We ignore the error here because of the guarantee of writeLength. // See the docs for writeLength for more info. _ = vw.writeLength() vw.pop() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 7f6b769..6e189fa 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -33,6 +33,11 @@ var decPool = sync.Pool{ type Decoder struct { dc bsoncodec.DecodeContext vr bsonrw.ValueReader + + // We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from + // (*Decoder).SetContext. + defaultDocumentM bool + defaultDocumentD bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -95,6 +100,12 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { + d.dc.DefaultDocumentM() + } + if d.defaultDocumentD { + d.dc.DefaultDocumentD() + } return decoder.DecodeValue(d.dc, d.vr, rval) } @@ -116,3 +127,15 @@ func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { d.dc = dc return nil } + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentM() { + d.defaultDocumentM = true +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentD() { + d.defaultDocumentD = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 094be93..0134006 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -9,21 +9,22 @@ // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description // of the codec system and examples of registering custom codecs, see the bsoncodec package. // -// Raw BSON +// # Raw BSON // // The Raw family of types is used to validate and retrieve elements from a slice of bytes. This // type is most useful when you want do lookups on BSON bytes without unmarshaling it into another // type. // // Example: -// var raw bson.Raw = ... // bytes from somewhere -// err := raw.Validate() -// if err != nil { return err } -// val := raw.Lookup("foo") -// i32, ok := val.Int32OK() -// // do something with i32... // -// Native Go Types +// var raw bson.Raw = ... // bytes from somewhere +// err := raw.Validate() +// if err != nil { return err } +// val := raw.Lookup("foo") +// i32, ok := val.Int32OK() +// // do something with i32... +// +// # Native Go Types // // The D and M types defined in this package can be used to build representations of BSON using native Go types. D is a // slice and M is a map. For more information about the use cases for these types, see the documentation on the type @@ -32,63 +33,64 @@ // Note that a D should not be constructed with duplicate key names, as that can cause undefined server behavior. // // Example: -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // // When decoding BSON to a D or M, the following type mappings apply when unmarshalling: // -// 1. BSON int32 unmarshals to an int32. -// 2. BSON int64 unmarshals to an int64. -// 3. BSON double unmarshals to a float64. -// 4. BSON string unmarshals to a string. -// 5. BSON boolean unmarshals to a bool. -// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). -// 7. BSON array unmarshals to a bson.A. -// 8. BSON ObjectId unmarshals to a primitive.ObjectID. -// 9. BSON datetime unmarshals to a primitive.DateTime. -// 10. BSON binary unmarshals to a primitive.Binary. -// 11. BSON regular expression unmarshals to a primitive.Regex. -// 12. BSON JavaScript unmarshals to a primitive.JavaScript. -// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. -// 14. BSON timestamp unmarshals to an primitive.Timestamp. -// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. -// 16. BSON min key unmarshals to an primitive.MinKey. -// 17. BSON max key unmarshals to an primitive.MaxKey. -// 18. BSON undefined unmarshals to a primitive.Undefined. -// 19. BSON null unmarshals to nil. -// 20. BSON DBPointer unmarshals to a primitive.DBPointer. -// 21. BSON symbol unmarshals to a primitive.Symbol. +// 1. BSON int32 unmarshals to an int32. +// 2. BSON int64 unmarshals to an int64. +// 3. BSON double unmarshals to a float64. +// 4. BSON string unmarshals to a string. +// 5. BSON boolean unmarshals to a bool. +// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). +// 7. BSON array unmarshals to a bson.A. +// 8. BSON ObjectId unmarshals to a primitive.ObjectID. +// 9. BSON datetime unmarshals to a primitive.DateTime. +// 10. BSON binary unmarshals to a primitive.Binary. +// 11. BSON regular expression unmarshals to a primitive.Regex. +// 12. BSON JavaScript unmarshals to a primitive.JavaScript. +// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. +// 14. BSON timestamp unmarshals to an primitive.Timestamp. +// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. +// 16. BSON min key unmarshals to an primitive.MinKey. +// 17. BSON max key unmarshals to an primitive.MaxKey. +// 18. BSON undefined unmarshals to a primitive.Undefined. +// 19. BSON null unmarshals to nil. +// 20. BSON DBPointer unmarshals to a primitive.DBPointer. +// 21. BSON symbol unmarshals to a primitive.Symbol. // // The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: // -// 1. time.Time marshals to a BSON datetime. -// 2. int8, int16, and int32 marshal to a BSON int32. -// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 -// otherwise. -// 4. int64 marshals to BSON int64. -// 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or -// undefined value into a string will yield the empty string.). +// 1. time.Time marshals to a BSON datetime. +// 2. int8, int16, and int32 marshal to a BSON int32. +// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 +// otherwise. +// 4. int64 marshals to BSON int64. +// 5. uint8 and uint16 marshal to a BSON int32. +// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, +// inclusive, and BSON int64 otherwise. +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// undefined value into a string will yield the empty string.). // -// Structs +// # Structs // // Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshalled or unmarshalled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is // marshalled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // unmarshalled into an interface{} field will be unmarshalled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. @@ -98,13 +100,14 @@ // are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: -// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) +// +// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. // The name may be empty in order to specify options without overriding the default field name. The following options can be used // to configure behavior: // -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if // their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). // Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered @@ -113,16 +116,16 @@ // never considered empty and will be marshalled as embedded documents. // NOTE: It is recommended that this tag be used for all slice and map fields. // -// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of +// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of // the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other // types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be trucated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled +// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // -// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when +// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when // marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be // pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a // map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be @@ -132,7 +135,7 @@ // This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be // marshalled. For fields that are not maps or structs, this tag is ignored. // -// Marshalling and Unmarshalling +// # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index 79f0385..db8d8ee 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -225,10 +225,13 @@ func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val return *sw, nil } +// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst. func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error { return json.Indent(dst, src, prefix, indent) } +// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed +// and indented. func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) { marshaled, err := MarshalExtJSON(val, canonical, escapeHTML) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index ffe4eed..ba7c911 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -191,10 +191,9 @@ func (d Decimal128) IsNaN() bool { // IsInf returns: // -// +1 d == Infinity -// 0 other case -// -1 d == -Infinity -// +// +1 d == Infinity +// 0 other case +// -1 d == -Infinity func (d Decimal128) IsInf() int { if d.h>>58&(1<<5-1) != 0x1E { return 0 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 652898f..ded3673 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -61,7 +61,9 @@ func (id ObjectID) Timestamp() time.Time { // Hex returns the hex encoding of the ObjectID as a string. func (id ObjectID) Hex() string { - return hex.EncodeToString(id[:]) + var buf [24]byte + hex.Encode(buf[:], id[:]) + return string(buf[:]) } func (id ObjectID) String() string { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index b3cba1b..c72ccc1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -182,7 +182,7 @@ type MaxKey struct{} // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D []E // Map creates a map from the elements of the D. @@ -206,12 +206,12 @@ type E struct { // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M map[string]interface{} // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A []interface{} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 09062d2..16d7573 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -13,7 +13,7 @@ import "go.mongodb.org/mongo-driver/bson/bsoncodec" var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index 6f9ca04..f936ba1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -23,7 +23,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/event/doc.go b/vendor/go.mongodb.org/mongo-driver/event/doc.go index 93b5ede..da1da4d 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/event/doc.go @@ -14,43 +14,43 @@ // CommandSucceededEvent or CommandFailedEvent through the RequestID field. For // example, the following code collects the names of started events: // -// var commandStarted []string -// cmdMonitor := &event.CommandMonitor{ -// Started: func(_ context.Context, evt *event.CommandStartedEvent) { -// commandStarted = append(commandStarted, evt.CommandName) -// }, -// } -// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetMonitor(cmdMonitor) -// client, err := mongo.Connect(context.Background(), clientOpts) +// var commandStarted []string +// cmdMonitor := &event.CommandMonitor{ +// Started: func(_ context.Context, evt *event.CommandStartedEvent) { +// commandStarted = append(commandStarted, evt.CommandName) +// }, +// } +// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetMonitor(cmdMonitor) +// client, err := mongo.Connect(context.Background(), clientOpts) // // Monitoring the connection pool requires specifying a PoolMonitor when constructing // a mongo.Client. The following code tracks the number of checked out connections: // -// var int connsCheckedOut -// poolMonitor := &event.PoolMonitor{ -// Event: func(evt *event.PoolEvent) { -// switch evt.Type { -// case event.GetSucceeded: -// connsCheckedOut++ -// case event.ConnectionReturned: -// connsCheckedOut-- -// } -// }, -// } -// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetPoolMonitor(poolMonitor) -// client, err := mongo.Connect(context.Background(), clientOpts) +// var int connsCheckedOut +// poolMonitor := &event.PoolMonitor{ +// Event: func(evt *event.PoolEvent) { +// switch evt.Type { +// case event.GetSucceeded: +// connsCheckedOut++ +// case event.ConnectionReturned: +// connsCheckedOut-- +// } +// }, +// } +// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetPoolMonitor(poolMonitor) +// client, err := mongo.Connect(context.Background(), clientOpts) // // Monitoring server changes specifying a ServerMonitor object when constructing // a mongo.Client. Different functions can be set on the ServerMonitor to // monitor different kinds of events. See ServerMonitor for more details. // The following code appends ServerHeartbeatStartedEvents to a slice: // -// var heartbeatStarted []*event.ServerHeartbeatStartedEvent -// svrMonitor := &event.ServerMonitor{ -// ServerHeartbeatStarted: func(e *event.ServerHeartbeatStartedEvent) { -// heartbeatStarted = append(heartbeatStarted, e) -// } -// } -// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetServerMonitor(svrMonitor) -// client, err := mongo.Connect(context.Background(), clientOpts) +// var heartbeatStarted []*event.ServerHeartbeatStartedEvent +// svrMonitor := &event.ServerMonitor{ +// ServerHeartbeatStarted: func(e *event.ServerHeartbeatStartedEvent) { +// heartbeatStarted = append(heartbeatStarted, e) +// } +// } +// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017").SetServerMonitor(svrMonitor) +// client, err := mongo.Connect(context.Background(), clientOpts) package event diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index d95deef..ac05e40 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -70,19 +70,22 @@ const ( ReasonStale = "stale" ReasonConnectionErrored = "connectionError" ReasonTimedOut = "timeout" + ReasonError = "error" ) // strings for pool command monitoring types const ( - ConnectionClosed = "ConnectionClosed" PoolCreated = "ConnectionPoolCreated" + PoolReady = "ConnectionPoolReady" + PoolCleared = "ConnectionPoolCleared" + PoolClosedEvent = "ConnectionPoolClosed" ConnectionCreated = "ConnectionCreated" ConnectionReady = "ConnectionReady" + ConnectionClosed = "ConnectionClosed" + GetStarted = "ConnectionCheckOutStarted" GetFailed = "ConnectionCheckOutFailed" GetSucceeded = "ConnectionCheckedOut" ConnectionReturned = "ConnectionCheckedIn" - PoolCleared = "ConnectionPoolCleared" - PoolClosedEvent = "ConnectionPoolClosed" ) // MonitorPoolOptions contains pool options as formatted in pool events diff --git a/vendor/go.mongodb.org/mongo-driver/internal/const.go b/vendor/go.mongodb.org/mongo-driver/internal/const.go index f2d2d88..a7ef69d 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/const.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/const.go @@ -9,11 +9,6 @@ package internal // import "go.mongodb.org/mongo-driver/internal" // Version is the current version of the driver. var Version = "local build" -// SetMockServiceID enables a mode in which the driver mocks server support for returning a "serviceId" field in "hello" -// command responses by using the value of "topologyVersion.processId". This is used for testing load balancer support -// until an upstream service can support running behind a load balancer. -var SetMockServiceID = false - // LegacyHello is the legacy version of the hello command. var LegacyHello = "isMaster" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go new file mode 100644 index 0000000..635d8e3 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "fmt" + + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +const ( + EncryptedCacheCollection = "ecc" + EncryptedStateCollection = "esc" + EncryptedCompactionCollection = "ecoc" +) + +// GetEncryptedStateCollectionName returns the encrypted state collection name associated with dataCollectionName. +func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionName string, stateCollection string) (string, error) { + fieldName := stateCollection + "Collection" + val, err := efBSON.LookupErr(fieldName) + if err != nil { + if err != bsoncore.ErrElementNotFound { + return "", err + } + // Return default name. + defaultName := "enxcol_." + dataCollectionName + "." + stateCollection + return defaultName, nil + } + + stateCollectionName, ok := val.StringValueOK() + if !ok { + return "", fmt.Errorf("expected string for '%v', got: %v", fieldName, val.Type) + } + return stateCollectionName, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go new file mode 100644 index 0000000..1e63257 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "context" + "time" +) + +type timeoutKey struct{} + +// MakeTimeoutContext returns a new context with Client-Side Operation Timeout (CSOT) feature-gated behavior +// and a Timeout set to the passed in Duration. Setting a Timeout on a single operation is not supported in +// public API. +// +// TODO(GODRIVER-2348) We may be able to remove this function once CSOT feature-gated behavior becomes the +// TODO default behavior. +func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) { + // Only use the passed in Duration as a timeout on the Context if it + // is non-zero. + cancelFunc := func() {} + if to != 0 { + ctx, cancelFunc = context.WithTimeout(ctx, to) + } + return context.WithValue(ctx, timeoutKey{}, true), cancelFunc +} + +func IsTimeoutContext(ctx context.Context) bool { + return ctx.Value(timeoutKey{}) != nil +} + +// ZeroRTTMonitor implements the RTTMonitor interface and is used internally for testing. It returns 0 for all +// RTT calculations and an empty string for RTT statistics. +type ZeroRTTMonitor struct{} + +// EWMA implements the RTT monitor interface. +func (zrm *ZeroRTTMonitor) EWMA() time.Duration { + return 0 +} + +// Min implements the RTT monitor interface. +func (zrm *ZeroRTTMonitor) Min() time.Duration { + return 0 +} + +// P90 implements the RTT monitor interface. +func (zrm *ZeroRTTMonitor) P90() time.Duration { + return 0 +} + +// Stats implements the RTT monitor interface. +func (zrm *ZeroRTTMonitor) Stats() string { + return "" +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/error.go b/vendor/go.mongodb.org/mongo-driver/internal/error.go index 6a105af..348bcdf 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/error.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/error.go @@ -32,7 +32,7 @@ func RolledUpErrorMessage(err error) string { return err.Error() } -//UnwrapError attempts to unwrap the error down to its root cause. +// UnwrapError attempts to unwrap the error down to its root cause. func UnwrapError(err error) error { switch tErr := err.(type) { @@ -117,3 +117,7 @@ func (e *wrappedError) Error() string { func (e *wrappedError) Inner() error { return e.inner } + +func (e *wrappedError) Unwrap() error { + return e.inner +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/http.go b/vendor/go.mongodb.org/mongo-driver/internal/http.go new file mode 100644 index 0000000..13c5fbe --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/http.go @@ -0,0 +1,38 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal // import "go.mongodb.org/mongo-driver/internal" + +import ( + "net/http" + "time" +) + +// DefaultHTTPClient is the default HTTP client used across the driver. +var DefaultHTTPClient = &http.Client{ + // TODO(GODRIVER-2623): Use "http.DefaultTransport.Clone" once we change the minimum supported Go version to 1.13. + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, +} + +// CloseIdleHTTPConnections closes any connections which were previously +// connected from previous requests but are now sitting idle in +// a "keep-alive" state. It does not interrupt any connections currently +// in use. +// Borrowed from go standard library. +func CloseIdleHTTPConnections(client *http.Client) { + type closeIdler interface { + CloseIdleConnections() + } + if tr, ok := client.Transport.(closeIdler); ok { + tr.CloseIdleConnections() + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go new file mode 100644 index 0000000..4479009 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go @@ -0,0 +1,38 @@ +// Copied from https://cs.opensource.google/go/go/+/946b4baaf6521d521928500b2b57429c149854e7:src/math/bits.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go new file mode 100644 index 0000000..859e4e0 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go @@ -0,0 +1,223 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/exp.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Exponential distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + re = 7.69711747013104972 +) + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1). +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func (r *Rand) ExpFloat64() float64 { + for { + j := r.Uint32() + i := j & 0xFF + x := float64(j) * float64(we[i]) + if j < ke[i] { + return x + } + if i == 0 { + return re - math.Log(r.Float64()) + } + if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { + return x + } + } +} + +var ke = [256]uint32{ + 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, + 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, + 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, + 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, + 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, + 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, + 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, + 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, + 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, + 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, + 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, + 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, + 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, + 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, + 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, + 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, + 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, + 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, + 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, + 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, + 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, + 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, + 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, + 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, + 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, + 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, + 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, + 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, + 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, + 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, + 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, + 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, + 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, + 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, + 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, + 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, + 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, + 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, + 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, + 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, + 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, + 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, + 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, + 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, + 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, + 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, + 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, + 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, + 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, + 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, + 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, + 0xe6da6ecf, +} +var we = [256]float32{ + 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, + 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, + 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, + 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, + 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, + 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, + 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, + 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, + 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, + 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, + 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, + 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, + 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, + 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, + 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, + 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, + 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, + 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, + 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, + 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, + 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, + 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, + 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, + 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, + 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, + 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, + 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, + 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, + 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, + 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, + 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, + 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, + 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, + 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, + 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, + 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, + 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, + 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, + 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, + 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, + 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, + 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, + 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, + 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, + 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, + 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, + 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, + 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, + 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, + 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, + 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, + 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, + 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, + 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, + 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, + 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, + 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, + 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, + 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, + 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, + 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, + 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, + 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, + 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, +} +var fe = [256]float32{ + 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, + 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, + 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, + 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, + 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, + 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, + 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, + 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, + 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, + 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, + 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, + 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, + 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, + 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, + 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, + 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, + 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, + 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, + 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, + 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, + 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, + 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, + 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, + 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, + 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, + 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, + 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, + 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, + 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, + 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, + 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, + 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, + 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, + 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, + 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, + 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, + 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, + 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, + 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, + 0.062193416, 0.060783047, 0.059384305, 0.057997175, + 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, + 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, + 0.043502413, 0.042254124, 0.041017443, 0.039792392, + 0.038578995, 0.037377283, 0.036187284, 0.035009038, + 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, + 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, + 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, + 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, + 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, + 0.008780315, 0.007963077, 0.0071633533, 0.006381906, + 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, + 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, + 0.00045413437, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go new file mode 100644 index 0000000..8c74a35 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go @@ -0,0 +1,158 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/normal.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Normal distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + rn = 3.442619855899 +) + +func absInt32(i int32) uint32 { + if i < 0 { + return uint32(-i) + } + return uint32(i) +} + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1). +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func (r *Rand) NormFloat64() float64 { + for { + j := int32(r.Uint32()) // Possibly negative + i := j & 0x7F + x := float64(j) * float64(wn[i]) + if absInt32(j) < kn[i] { + // This case should be hit better than 99% of the time. + return x + } + + if i == 0 { + // This extra work is only required for the base strip. + for { + x = -math.Log(r.Float64()) * (1.0 / rn) + y := -math.Log(r.Float64()) + if y+y >= x*x { + break + } + } + if j > 0 { + return rn + x + } + return -rn - x + } + if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { + return x + } + } +} + +var kn = [128]uint32{ + 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, + 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, + 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, + 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, + 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, + 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, + 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, + 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, + 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, + 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, + 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, + 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, + 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, + 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, + 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, + 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, + 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, + 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, + 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, + 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, + 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, + 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, + 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, + 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, + 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, + 0x7ba90bdc, 0x7a722176, 0x77d664e5, +} +var wn = [128]float32{ + 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, + 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, + 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, + 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, + 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, + 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, + 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, + 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, + 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, + 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, + 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, + 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, + 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, + 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, + 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, + 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, + 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, + 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, + 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, + 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, + 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, + 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, + 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, + 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, + 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, + 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, + 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, + 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, + 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, + 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, + 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, + 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, +} +var fn = [128]float32{ + 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, + 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, + 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, + 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, + 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, + 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, + 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, + 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, + 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, + 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, + 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, + 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, + 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, + 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, + 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, + 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, + 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, + 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, + 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, + 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, + 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, + 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, + 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, + 0.040742867, 0.03688439, 0.033087887, 0.029356318, + 0.025693292, 0.022103304, 0.018592102, 0.015167298, + 0.011839478, 0.008624485, 0.005548995, 0.0026696292, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go new file mode 100644 index 0000000..ffd0509 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go @@ -0,0 +1,374 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rand.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rand implements pseudo-random number generators. +// +// Random numbers are generated by a Source. Top-level functions, such as +// Float64 and Int, use a default shared Source that produces a deterministic +// sequence of values each time a program is run. Use the Seed function to +// initialize the default Source if different behavior is required for each run. +// The default Source, a LockedSource, is safe for concurrent use by multiple +// goroutines, but Sources created by NewSource are not. However, Sources are small +// and it is reasonable to have a separate Source for each goroutine, seeded +// differently, to avoid locking. +// +// For random numbers suitable for security-sensitive work, see the crypto/rand +// package. +package rand + +import "sync" + +// A Source represents a source of uniformly-distributed +// pseudo-random int64 values in the range [0, 1<<64). +type Source interface { + Uint64() uint64 + Seed(seed uint64) +} + +// NewSource returns a new pseudo-random Source seeded with the given value. +func NewSource(seed uint64) Source { + var rng PCGSource + rng.Seed(seed) + return &rng +} + +// A Rand is a source of random numbers. +type Rand struct { + src Source + + // readVal contains remainder of 64-bit integer used for bytes + // generation during most recent Read call. + // It is saved so next Read call can start where the previous + // one finished. + readVal uint64 + // readPos indicates the number of low-order bytes of readVal + // that are still valid. + readPos int8 +} + +// New returns a new Rand that uses random values from src +// to generate other random values. +func New(src Source) *Rand { + return &Rand{src: src} +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// Seed should not be called concurrently with any other Rand method. +func (r *Rand) Seed(seed uint64) { + if lk, ok := r.src.(*LockedSource); ok { + lk.seedPos(seed, &r.readPos) + return + } + + r.src.Seed(seed) + r.readPos = 0 +} + +// Uint64 returns a pseudo-random 64-bit integer as a uint64. +func (r *Rand) Uint64() uint64 { return r.src.Uint64() } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } + +// Uint32 returns a pseudo-random 32-bit value as a uint32. +func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. +func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } + +// Int returns a non-negative pseudo-random int. +func (r *Rand) Int() int { + u := uint(r.Uint64()) + return int(u << 1 >> 1) // clear sign bit. +} + +const maxUint64 = (1 << 64) - 1 + +// Uint64n returns, as a uint64, a pseudo-random number in [0,n). +// It is guaranteed more uniform than taking a Source value mod n +// for any n that is not a power of 2. +func (r *Rand) Uint64n(n uint64) uint64 { + if n&(n-1) == 0 { // n is power of two, can mask + if n == 0 { + panic("invalid argument to Uint64n") + } + return r.Uint64() & (n - 1) + } + // If n does not divide v, to avoid bias we must not use + // a v that is within maxUint64%n of the top of the range. + v := r.Uint64() + if v > maxUint64-n { // Fast check. + ceiling := maxUint64 - maxUint64%n + for v >= ceiling { + v = r.Uint64() + } + } + + return v % n +} + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + return int64(r.Uint64n(uint64(n))) +} + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int32(r.Uint64n(uint64(n))) +} + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int(r.Uint64n(uint64(n))) +} + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float64() float64 { + // There is one bug in the value stream: r.Int63() may be so close + // to 1<<63 that the division rounds up to 1.0, and we've guaranteed + // that the result is always less than 1.0. + // + // We tried to fix this by mapping 1.0 back to 0.0, but since float64 + // values near 0 are much denser than near 1, mapping 1 to 0 caused + // a theoretically significant overshoot in the probability of returning 0. + // Instead of that, if we round up to 1, just try again. + // Getting 1 only happens 1/2⁵³ of the time, so most clients + // will not observe it anyway. +again: + f := float64(r.Uint64n(1<<53)) / (1 << 53) + if f == 1.0 { + goto again // resample; this branch is taken O(never) + } + return f +} + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float32() float32 { + // We do not want to return 1.0. + // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). +again: + f := float32(r.Float64()) + if f == 1 { + goto again // resample; this branch is taken O(very rarely) + } + return f +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (r *Rand) Perm(n int) []int { + m := make([]int, n) + // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. + // A change to remove this useless iteration is to assign 1 to i in the init + // statement. But Perm also effects r. Making this change will affect + // the final state of r. So this change can't be made for compatibility + // reasons for Go 1. + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func (r *Rand) Shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(r.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(r.Int31n(int32(i + 1))) + swap(i, j) + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. +func (r *Rand) Read(p []byte) (n int, err error) { + if lk, ok := r.src.(*LockedSource); ok { + return lk.Read(p, &r.readVal, &r.readPos) + } + return read(p, r.src, &r.readVal, &r.readPos) +} + +func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { + pos := *readPos + val := *readVal + rng, _ := src.(*PCGSource) + for n = 0; n < len(p); n++ { + if pos == 0 { + if rng != nil { + val = rng.Uint64() + } else { + val = src.Uint64() + } + pos = 8 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + *readPos = pos + *readVal = val + return +} + +/* + * Top-level convenience functions + */ + +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) + +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as +// if seeded by Seed(1). +// Seed, unlike the Rand.Seed method, is safe for concurrent use. +func Seed(seed uint64) { globalRand.Seed(seed) } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 +// from the default Source. +func Int63() int64 { return globalRand.Int63() } + +// Uint32 returns a pseudo-random 32-bit value as a uint32 +// from the default Source. +func Uint32() uint32 { return globalRand.Uint32() } + +// Uint64 returns a pseudo-random 64-bit value as a uint64 +// from the default Source. +func Uint64() uint64 { return globalRand.Uint64() } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 +// from the default Source. +func Int31() int32 { return globalRand.Int31() } + +// Int returns a non-negative pseudo-random int from the default Source. +func Int() int { return globalRand.Int() } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int63n(n int64) int64 { return globalRand.Int63n(n) } + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int31n(n int32) int32 { return globalRand.Int31n(n) } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Intn(n int) int { return globalRand.Intn(n) } + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float64() float64 { return globalRand.Float64() } + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float32() float32 { return globalRand.Float32() } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { return globalRand.Perm(n) } + +// Shuffle pseudo-randomizes the order of elements using the default Source. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } + +// Read generates len(p) random bytes from the default Source and +// writes them into p. It always returns len(p) and a nil error. +// Read, unlike the Rand.Read method, is safe for concurrent use. +func Read(p []byte) (n int, err error) { return globalRand.Read(p) } + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1) +// from the default Source. +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func NormFloat64() float64 { return globalRand.NormFloat64() } + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func ExpFloat64() float64 { return globalRand.ExpFloat64() } + +// LockedSource is an implementation of Source that is concurrency-safe. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. +type LockedSource struct { + lk sync.Mutex + src PCGSource +} + +func (s *LockedSource) Uint64() (n uint64) { + s.lk.Lock() + n = s.src.Uint64() + s.lk.Unlock() + return +} + +func (s *LockedSource) Seed(seed uint64) { + s.lk.Lock() + s.src.Seed(seed) + s.lk.Unlock() +} + +// seedPos implements Seed for a LockedSource without a race condiiton. +func (s *LockedSource) seedPos(seed uint64, readPos *int8) { + s.lk.Lock() + s.src.Seed(seed) + *readPos = 0 + s.lk.Unlock() +} + +// Read implements Read for a LockedSource. +func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { + s.lk.Lock() + n, err = read(p, &s.src, readVal, readPos) + s.lk.Unlock() + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go new file mode 100644 index 0000000..f04f987 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go @@ -0,0 +1,93 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rng.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "encoding/binary" + "io" + "math/bits" +) + +// PCGSource is an implementation of a 64-bit permuted congruential +// generator as defined in +// +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// +// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) +// as found in the software available at http://www.pcg-random.org/. +// It has period 2^128 with 128 bits of state, producing 64-bit values. +// Is state is represented by two uint64 words. +type PCGSource struct { + low uint64 + high uint64 +} + +const ( + maxUint32 = (1 << 32) - 1 + + multiplier = 47026247687942121848144207491837523525 + mulHigh = multiplier >> 64 + mulLow = multiplier & maxUint64 + + increment = 117397592171526113268558934119004209487 + incHigh = increment >> 64 + incLow = increment & maxUint64 + + // TODO: Use these? + initializer = 245720598905631564143578724636268694099 + initHigh = initializer >> 64 + initLow = initializer & maxUint64 +) + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +func (pcg *PCGSource) Seed(seed uint64) { + pcg.low = seed + pcg.high = seed // TODO: What is right? +} + +// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. +func (pcg *PCGSource) Uint64() uint64 { + pcg.multiply() + pcg.add() + // XOR high and low 64 bits together and rotate right by high 6 bits of state. + return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) +} + +func (pcg *PCGSource) add() { + var carry uint64 + pcg.low, carry = Add64(pcg.low, incLow, 0) + pcg.high, _ = Add64(pcg.high, incHigh, carry) +} + +func (pcg *PCGSource) multiply() { + hi, lo := Mul64(pcg.low, mulLow) + hi += pcg.high * mulLow + hi += pcg.low * mulHigh + pcg.low = lo + pcg.high = hi +} + +// MarshalBinary returns the binary representation of the current state of the generator. +func (pcg *PCGSource) MarshalBinary() ([]byte, error) { + var buf [16]byte + binary.BigEndian.PutUint64(buf[:8], pcg.high) + binary.BigEndian.PutUint64(buf[8:], pcg.low) + return buf[:], nil +} + +// UnmarshalBinary sets the state of the generator to the state represented in data. +func (pcg *PCGSource) UnmarshalBinary(data []byte) error { + if len(data) < 16 { + return io.ErrUnexpectedEOF + } + pcg.low = binary.BigEndian.Uint64(data[8:]) + pcg.high = binary.BigEndian.Uint64(data[:8]) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go index 631f953..9616074 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -1,54 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package randutil provides common random number utilities. package randutil import ( - "math/rand" - "sync" + crand "crypto/rand" + "fmt" + "io" + + xrand "go.mongodb.org/mongo-driver/internal/randutil/rand" ) -// A LockedRand wraps a "math/rand".Rand and is safe to use from multiple goroutines. -type LockedRand struct { - mu sync.Mutex - r *rand.Rand +// NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a +// cryptographically-secure random number. +// It is safe to use from multiple goroutines. +func NewLockedRand() *xrand.Rand { + var randSrc = new(xrand.LockedSource) + randSrc.Seed(cryptoSeed()) + return xrand.New(randSrc) } -// NewLockedRand returns a new LockedRand that uses random values from src to generate other random -// values. It is safe to use from multiple goroutines. -func NewLockedRand(src rand.Source) *LockedRand { - return &LockedRand{ - // Ignore gosec warning "Use of weak random number generator (math/rand instead of - // crypto/rand)". We intentionally use a pseudo-random number generator. - /* #nosec G404 */ - r: rand.New(src), +// cryptoSeed returns a random uint64 read from the "crypto/rand" random number generator. It is +// intended to be used to seed pseudorandom number generators at package initialization. It panics +// if it encounters any errors. +func cryptoSeed() uint64 { + var b [8]byte + _, err := io.ReadFull(crand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err)) } -} -// Read generates len(p) random bytes and writes them into p. It always returns len(p) and a nil -// error. -func (lr *LockedRand) Read(p []byte) (int, error) { - lr.mu.Lock() - n, err := lr.r.Read(p) - lr.mu.Unlock() - return n, err -} - -// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). It -// panics if n <= 0. -func (lr *LockedRand) Intn(n int) int { - lr.mu.Lock() - x := lr.r.Intn(n) - lr.mu.Unlock() - return x -} - -// Shuffle pseudo-randomizes the order of elements. n is the number of elements. Shuffle panics if -// n < 0. swap swaps the elements with indexes i and j. -// -// Note that Shuffle locks the LockedRand, so shuffling large collections may adversely affect other -// concurrent calls. If many concurrent Shuffle and random value calls are required, consider using -// the global "math/rand".Shuffle instead because it uses much more granular locking. -func (lr *LockedRand) Shuffle(n int, swap func(i, j int)) { - lr.mu.Lock() - lr.r.Shuffle(n, swap) - lr.mu.Unlock() + return (uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56) } diff --git a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go index db1e189..6cafa79 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go @@ -33,7 +33,7 @@ func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) { return nil, err } - var strs []string + strs := make([]string, 0, len(arrayValues)) for _, arrayVal := range arrayValues { str, ok := arrayVal.StringValueOK() if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go new file mode 100644 index 0000000..78f1664 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go @@ -0,0 +1,53 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package uuid + +import ( + "io" + + "go.mongodb.org/mongo-driver/internal/randutil" +) + +// UUID represents a UUID. +type UUID [16]byte + +// A source is a UUID generator that reads random values from a io.Reader. +// It should be safe to use from multiple goroutines. +type source struct { + random io.Reader +} + +// new returns a random UUIDv4 with bytes read from the source's random number generator. +func (s *source) new() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(s.random, uuid[:]) + if err != nil { + return UUID{}, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +// newSource returns a source that uses a pseudo-random number generator in reandutil package. +// It is intended to be used to initialize the package-global UUID generator. +func newSource() *source { + return &source{ + random: randutil.NewLockedRand(), + } +} + +// globalSource is a package-global pseudo-random UUID generator. +var globalSource = newSource() + +// New returns a random UUIDv4. It uses a global pseudo-random number generator in randutil +// at package initialization. +// +// New should not be used to generate cryptographically-secure random UUIDs. +func New() (UUID, error) { + return globalSource.new() +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go b/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go index 5655b34..fb6abbc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/address/addr.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package address provides structured representations of network addresses. package address // import "go.mongodb.org/mongo-driver/mongo/address" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go index 0b7432f..966e43c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package mongo import ( diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index fb5c91a..2c58f22 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -27,6 +27,7 @@ type bulkWriteBatch struct { // bulkWrite perfoms a bulkwrite operation type bulkWrite struct { + comment interface{} ordered *bool bypassDocumentValidation *bool models []WriteModel @@ -35,6 +36,7 @@ type bulkWrite struct { selector description.ServerSelector writeConcern *writeconcern.WriteConcern result BulkWriteResult + let interface{} } func (bw *bulkWrite) execute(ctx context.Context) error { @@ -113,7 +115,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.InsertedCount = int64(res.N) + batchRes.InsertedCount = res.N case *DeleteOneModel, *DeleteManyModel: res, err := bw.runDelete(ctx, batch) if err != nil { @@ -125,7 +127,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.DeletedCount = int64(res.N) + batchRes.DeletedCount = res.N case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel: res, err := bw.runUpdate(ctx, batch) if err != nil { @@ -137,8 +139,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.MatchedCount = int64(res.N) - batchRes.ModifiedCount = int64(res.NModified) + batchRes.MatchedCount = res.N + batchRes.ModifiedCount = res.NModified batchRes.UpsertedCount = int64(len(res.Upserted)) for _, upsert := range res.Upserted { batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID @@ -177,7 +179,14 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation { op = op.BypassDocumentValidation(*bw.bypassDocumentValidation) } @@ -227,7 +236,21 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.DeleteResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -308,7 +331,21 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI) + ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.UpdateResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -380,7 +417,6 @@ func createUpdateDoc( } updateDoc, _ = bsoncore.AppendDocumentEnd(updateDoc, uidx) - return updateDoc, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go index b4b8e3e..64f4589 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go @@ -152,7 +152,7 @@ func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel { } // SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot -// contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel { rom.Replacement = rep return rom @@ -210,7 +210,7 @@ func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel { } // SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel { uom.Update = update return uom @@ -274,7 +274,7 @@ func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel { } // SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel { umm.Update = update return umm diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index f2a194d..82d2b57 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -63,27 +64,28 @@ var ( // ChangeStream is used to iterate over a stream of events. Each event can be decoded into a Go type via the Decode // method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used // concurrently by multiple goroutines. For more information about change streams, see -// https://docs.mongodb.com/manual/changeStreams/. +// https://www.mongodb.com/docs/manual/changeStreams/. type ChangeStream struct { // Current is the BSON bytes of the current event. This property is only valid until the next call to Next or // TryNext. If continued access is required, a copy must be made. Current bson.Raw - aggregate *operation.Aggregate - pipelineSlice []bsoncore.Document - cursor changeStreamCursor - cursorOptions driver.CursorOptions - batch []bsoncore.Document - resumeToken bson.Raw - err error - sess *session.Client - client *Client - registry *bsoncodec.Registry - streamType StreamType - options *options.ChangeStreamOptions - selector description.ServerSelector - operationTime *primitive.Timestamp - wireVersion *description.VersionRange + aggregate *operation.Aggregate + pipelineSlice []bsoncore.Document + pipelineOptions map[string]bsoncore.Value + cursor changeStreamCursor + cursorOptions driver.CursorOptions + batch []bsoncore.Document + resumeToken bson.Raw + err error + sess *session.Client + client *Client + registry *bsoncodec.Registry + streamType StreamType + options *options.ChangeStreamOptions + selector description.ServerSelector + operationTime *primitive.Timestamp + wireVersion *description.VersionRange } type changeStreamConfig struct { @@ -131,11 +133,20 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ReadPreference(config.readPreference).ReadConcern(config.readConcern). Deployment(cs.client.deployment).ClusterClock(cs.client.clock). CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector).Retry(driver.RetryNone). - ServerAPI(cs.client.serverAPI).Crypt(config.crypt) + ServerAPI(cs.client.serverAPI).Crypt(config.crypt).Timeout(cs.client.timeout) if cs.options.Collation != nil { cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument())) } + if comment := cs.options.Comment; comment != nil { + cs.aggregate.Comment(*comment) + + commentVal, err := transformValue(cs.registry, comment, true, "comment") + if err != nil { + return nil, err + } + cs.cursorOptions.Comment = commentVal + } if cs.options.BatchSize != nil { cs.aggregate.BatchSize(*cs.options.BatchSize) cs.cursorOptions.BatchSize = *cs.options.BatchSize @@ -143,6 +154,37 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in if cs.options.MaxAwaitTime != nil { cs.cursorOptions.MaxTimeMS = int64(*cs.options.MaxAwaitTime / time.Millisecond) } + if cs.options.Custom != nil { + // Marshal all custom options before passing to the initial aggregate. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + cs.aggregate.CustomOptions(customOptions) + } + if cs.options.CustomPipeline != nil { + // Marshal all custom pipeline options before building pipeline slice. Return + // any errors from Marshaling. + cs.pipelineOptions = make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.CustomPipeline { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + cs.pipelineOptions[optionName] = optionValueBSON + } + } switch cs.streamType { case ClientStream: @@ -198,7 +240,6 @@ func (cs *ChangeStream) createOperationDeployment(server driver.Server, connecti func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) error { var server driver.Server var conn driver.Connection - var err error if server, cs.err = cs.client.deployment.SelectServer(ctx, cs.selector); cs.err != nil { return cs.Err() @@ -212,9 +253,12 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Deployment(cs.createOperationDeployment(server, conn)) if resuming { - cs.replaceOptions(ctx, cs.wireVersion) + cs.replaceOptions(cs.wireVersion) - csOptDoc := cs.createPipelineOptionsDoc() + csOptDoc, err := cs.createPipelineOptionsDoc() + if err != nil { + return err + } pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil) pipDoc = bsoncore.AppendDocumentElement(pipDoc, "$changeStream", csOptDoc) if pipDoc, cs.err = bsoncore.AppendDocumentEnd(pipDoc, pipIdx); cs.err != nil { @@ -229,48 +273,72 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Pipeline(plArr) } - if original := cs.aggregate.Execute(ctx); original != nil { - retryableRead := cs.client.retryReads && cs.wireVersion != nil && cs.wireVersion.Max >= 6 - if !retryableRead { - cs.err = replaceErrors(original) - return cs.err + // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already + // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution + // and potential retry. + if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !internal.IsTimeoutContext(ctx) { + newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *cs.client.timeout) + // Redefine ctx to be the new timeout-derived context. + ctx = newCtx + // Cancel the timeout-derived context at the end of executeOperation to avoid a context leak. + defer cancelFunc() + } + + // Execute the aggregate, retrying on retryable errors once (1) if retryable reads are enabled and + // infinitely (-1) if context is a Timeout context. + var retries int + if cs.client.retryReads { + retries = 1 + } + if internal.IsTimeoutContext(ctx) { + retries = -1 + } + + var err error +AggregateExecuteLoop: + for { + err = cs.aggregate.Execute(ctx) + // If no error or no retries remain, do not retry. + if err == nil || retries == 0 { + break AggregateExecuteLoop } - cs.err = original - switch tt := original.(type) { + switch tt := err.(type) { case driver.Error: + // If error is not retryable, do not retry. if !tt.RetryableRead() { - break + break AggregateExecuteLoop } + // If error is retryable: subtract 1 from retries, redo server selection, checkout + // a connection, and restart loop. + retries-- server, err = cs.client.deployment.SelectServer(ctx, cs.selector) if err != nil { - break + break AggregateExecuteLoop } conn.Close() conn, err = server.Connection(ctx) if err != nil { - break + break AggregateExecuteLoop } defer conn.Close() - cs.wireVersion = conn.Description().WireVersion - if cs.wireVersion == nil || cs.wireVersion.Max < 6 { - break - } + // Update the wire version with data from the new connection. + cs.wireVersion = conn.Description().WireVersion + // Reset deployment. cs.aggregate.Deployment(cs.createOperationDeployment(server, conn)) - cs.err = cs.aggregate.Execute(ctx) - } - - if cs.err != nil { - cs.err = replaceErrors(cs.err) - return cs.Err() + default: + // Do not retry if error is not a driver error. + break AggregateExecuteLoop } - } - cs.err = nil + if err != nil { + cs.err = replaceErrors(err) + return cs.err + } cr := cs.aggregate.ResultCursorResponse() cr.Server = server @@ -331,9 +399,10 @@ func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error { cs.pipelineSlice = make([]bsoncore.Document, 0, val.Len()+1) csIdx, csDoc := bsoncore.AppendDocumentStart(nil) - csDocTemp := cs.createPipelineOptionsDoc() - if cs.err != nil { - return cs.err + + csDocTemp, err := cs.createPipelineOptionsDoc() + if err != nil { + return err } csDoc = bsoncore.AppendDocumentElement(csDoc, "$changeStream", csDocTemp) csDoc, cs.err = bsoncore.AppendDocumentEnd(csDoc, csIdx) @@ -355,7 +424,7 @@ func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error { return cs.err } -func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { +func (cs *ChangeStream) createPipelineOptionsDoc() (bsoncore.Document, error) { plDocIdx, plDoc := bsoncore.AppendDocumentStart(nil) if cs.streamType == ClientStream { @@ -363,24 +432,34 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { } if cs.options.FullDocument != nil { - plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + if *cs.options.FullDocument != options.Default { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + } + } + + if cs.options.FullDocumentBeforeChange != nil { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocumentBeforeChange", string(*cs.options.FullDocumentBeforeChange)) } if cs.options.ResumeAfter != nil { var raDoc bsoncore.Document raDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.ResumeAfter, true, "resumeAfter") if cs.err != nil { - return nil + return nil, cs.err } plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc) } + if cs.options.ShowExpandedEvents != nil { + plDoc = bsoncore.AppendBooleanElement(plDoc, "showExpandedEvents", *cs.options.ShowExpandedEvents) + } + if cs.options.StartAfter != nil { var saDoc bsoncore.Document saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter, true, "startAfter") if cs.err != nil { - return nil + return nil, cs.err } plDoc = bsoncore.AppendDocumentElement(plDoc, "startAfter", saDoc) @@ -390,11 +469,16 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I) } + // Append custom pipeline options. + for optionName, optionValue := range cs.pipelineOptions { + plDoc = bsoncore.AppendValueElement(plDoc, optionName, optionValue) + } + if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil { - return nil + return nil, cs.err } - return plDoc + return plDoc, nil } func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) { @@ -408,7 +492,7 @@ func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) { return pipelineArr, cs.err } -func (cs *ChangeStream) replaceOptions(ctx context.Context, wireVersion *description.VersionRange) { +func (cs *ChangeStream) replaceOptions(wireVersion *description.VersionRange) { // Cached resume token: use the resume token as the resumeAfter option and set no other resume options if cs.resumeToken != nil { cs.options.SetResumeAfter(cs.resumeToken) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go index 36c6e25..4dca59f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go @@ -8,7 +8,6 @@ package mongo import ( "context" - "time" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -36,8 +35,8 @@ func (c *changeStreamDeployment) Connection(context.Context) (driver.Connection, return c.conn, nil } -func (c *changeStreamDeployment) MinRTT() time.Duration { - return c.server.MinRTT() +func (c *changeStreamDeployment) RTTMonitor() driver.RTTMonitor { + return c.server.RTTMonitor() } func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 63630eb..d80fd1d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -8,15 +8,16 @@ package mongo import ( "context" - "crypto/tls" "errors" "fmt" - "strings" + "net/http" "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/uuid" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -24,15 +25,17 @@ import ( "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" - "go.mongodb.org/mongo-driver/x/mongo/driver/auth" - "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" - "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" ) -const defaultLocalThreshold = 15 * time.Millisecond +const ( + defaultLocalThreshold = 15 * time.Millisecond + defaultMaxPoolSize = 100 +) var ( // keyVaultCollOpts specifies options used to communicate with the key vault collection @@ -48,29 +51,31 @@ var ( // The Client type opens and closes connections automatically and maintains a pool of idle connections. For // connection pool configuration options, see documentation for the ClientOptions type in the mongo/options package. type Client struct { - id uuid.UUID - topologyOptions []topology.Option - deployment driver.Deployment - localThreshold time.Duration - retryWrites bool - retryReads bool - clock *session.ClusterClock - readPreference *readpref.ReadPref - readConcern *readconcern.ReadConcern - writeConcern *writeconcern.WriteConcern - registry *bsoncodec.Registry - monitor *event.CommandMonitor - serverAPI *driver.ServerAPIOptions - serverMonitor *event.ServerMonitor - sessionPool *session.Pool + id uuid.UUID + deployment driver.Deployment + localThreshold time.Duration + retryWrites bool + retryReads bool + clock *session.ClusterClock + readPreference *readpref.ReadPref + readConcern *readconcern.ReadConcern + writeConcern *writeconcern.WriteConcern + registry *bsoncodec.Registry + monitor *event.CommandMonitor + serverAPI *driver.ServerAPIOptions + serverMonitor *event.ServerMonitor + sessionPool *session.Pool + timeout *time.Duration + httpClient *http.Client // client-side encryption fields - keyVaultClientFLE *Client - keyVaultCollFLE *Collection - mongocryptdFLE *mcryptClient - cryptFLE driver.Crypt - metadataClientFLE *Client - internalClientFLE *Client + keyVaultClientFLE *Client + keyVaultCollFLE *Collection + mongocryptdFLE *mongocryptdClient + cryptFLE driver.Crypt + metadataClientFLE *Client + internalClientFLE *Client + encryptedFieldsMap map[string]interface{} } // Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling @@ -129,13 +134,84 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { } client := &Client{id: id} - err = client.configure(clientOpt) + // ClusterClock + client.clock = new(session.ClusterClock) + + // LocalThreshold + client.localThreshold = defaultLocalThreshold + if clientOpt.LocalThreshold != nil { + client.localThreshold = *clientOpt.LocalThreshold + } + // Monitor + if clientOpt.Monitor != nil { + client.monitor = clientOpt.Monitor + } + // ServerMonitor + if clientOpt.ServerMonitor != nil { + client.serverMonitor = clientOpt.ServerMonitor + } + // ReadConcern + client.readConcern = readconcern.New() + if clientOpt.ReadConcern != nil { + client.readConcern = clientOpt.ReadConcern + } + // ReadPreference + client.readPreference = readpref.Primary() + if clientOpt.ReadPreference != nil { + client.readPreference = clientOpt.ReadPreference + } + // Registry + client.registry = bson.DefaultRegistry + if clientOpt.Registry != nil { + client.registry = clientOpt.Registry + } + // RetryWrites + client.retryWrites = true // retry writes on by default + if clientOpt.RetryWrites != nil { + client.retryWrites = *clientOpt.RetryWrites + } + client.retryReads = true + if clientOpt.RetryReads != nil { + client.retryReads = *clientOpt.RetryReads + } + // Timeout + client.timeout = clientOpt.Timeout + client.httpClient = clientOpt.HTTPClient + // WriteConcern + if clientOpt.WriteConcern != nil { + client.writeConcern = clientOpt.WriteConcern + } + // AutoEncryptionOptions + if clientOpt.AutoEncryptionOptions != nil { + if err := client.configureAutoEncryption(clientOpt); err != nil { + return nil, err + } + } else { + client.cryptFLE = clientOpt.Crypt + } + + // Deployment + if clientOpt.Deployment != nil { + client.deployment = clientOpt.Deployment + } + + // Set default options + if clientOpt.MaxPoolSize == nil { + clientOpt.SetMaxPoolSize(defaultMaxPoolSize) + } + if err != nil { return nil, err } + cfg, err := topology.NewConfig(clientOpt, client.clock) + if err != nil { + return nil, err + } + client.serverAPI = topology.ServerAPIFromServerOptions(cfg.ServerOpts) + if client.deployment == nil { - client.deployment, err = topology.New(client.topologyOptions...) + client.deployment, err = topology.New(cfg) if err != nil { return nil, replaceErrors(err) } @@ -205,6 +281,10 @@ func (c *Client) Disconnect(ctx context.Context) error { ctx = context.Background() } + if c.httpClient == internal.DefaultHTTPClient { + defer internal.CloseIdleHTTPConnections(c.httpClient) + } + c.endSessions(ctx) if c.mongocryptdFLE != nil { if err := c.mongocryptdFLE.disconnect(ctx); err != nil { @@ -235,6 +315,7 @@ func (c *Client) Disconnect(ctx context.Context) error { if disconnector, ok := c.deployment.(driver.Disconnector); ok { return replaceErrors(disconnector.Disconnect(ctx)) } + return nil } @@ -271,6 +352,9 @@ func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error { // StartSession does not actually communicate with the server and will not error if the client is // disconnected. // +// StartSession is safe to call from multiple goroutines concurrently. However, Sessions returned by StartSession are +// not safe for concurrent use by multiple goroutines. +// // If the DefaultReadConcern, DefaultWriteConcern, or DefaultReadPreference options are not set, the client's read // concern, write concern, or read preference will be used, respectively. func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) { @@ -347,368 +431,31 @@ func (c *Client) endSessions(ctx context.Context) { } } -func (c *Client) configure(opts *options.ClientOptions) error { - if err := opts.Validate(); err != nil { - return err - } - - var connOpts []topology.ConnectionOption - var serverOpts []topology.ServerOption - var topologyOpts []topology.Option - - // TODO(GODRIVER-814): Add tests for topology, server, and connection related options. - - // ServerAPIOptions need to be handled early as other client and server options below reference - // c.serverAPI and serverOpts.serverAPI. - if opts.ServerAPIOptions != nil { - // convert passed in options to driver form for client. - c.serverAPI = convertToDriverAPIOptions(opts.ServerAPIOptions) - - serverOpts = append(serverOpts, topology.WithServerAPI(func(*driver.ServerAPIOptions) *driver.ServerAPIOptions { - return c.serverAPI - })) - } - - // ClusterClock - c.clock = new(session.ClusterClock) - - // Pass down URI, SRV service name, and SRV max hosts so topology can poll SRV records correctly. - topologyOpts = append(topologyOpts, - topology.WithURI(func(uri string) string { return opts.GetURI() }), - topology.WithSRVServiceName(func(srvName string) string { - if opts.SRVServiceName != nil { - return *opts.SRVServiceName - } - return "" - }), - topology.WithSRVMaxHosts(func(srvMaxHosts int) int { - if opts.SRVMaxHosts != nil { - return *opts.SRVMaxHosts - } - return 0 - }), - ) - - // AppName - var appName string - if opts.AppName != nil { - appName = *opts.AppName - - serverOpts = append(serverOpts, topology.WithServerAppName(func(string) string { - return appName - })) - } - // Compressors & ZlibLevel - var comps []string - if len(opts.Compressors) > 0 { - comps = opts.Compressors - - connOpts = append(connOpts, topology.WithCompressors( - func(compressors []string) []string { - return append(compressors, comps...) - }, - )) - - for _, comp := range comps { - switch comp { - case "zlib": - connOpts = append(connOpts, topology.WithZlibLevel(func(level *int) *int { - return opts.ZlibLevel - })) - case "zstd": - connOpts = append(connOpts, topology.WithZstdLevel(func(level *int) *int { - return opts.ZstdLevel - })) - } - } - - serverOpts = append(serverOpts, topology.WithCompressionOptions( - func(opts ...string) []string { return append(opts, comps...) }, - )) - } - - var loadBalanced bool - if opts.LoadBalanced != nil { - loadBalanced = *opts.LoadBalanced - } - - // Handshaker - var handshaker = func(driver.Handshaker) driver.Handshaker { - return operation.NewHello().AppName(appName).Compressors(comps).ClusterClock(c.clock). - ServerAPI(c.serverAPI).LoadBalanced(loadBalanced) - } - // Auth & Database & Password & Username - if opts.Auth != nil { - cred := &auth.Cred{ - Username: opts.Auth.Username, - Password: opts.Auth.Password, - PasswordSet: opts.Auth.PasswordSet, - Props: opts.Auth.AuthMechanismProperties, - Source: opts.Auth.AuthSource, - } - mechanism := opts.Auth.AuthMechanism - - if len(cred.Source) == 0 { - switch strings.ToUpper(mechanism) { - case auth.MongoDBX509, auth.GSSAPI, auth.PLAIN: - cred.Source = "$external" - default: - cred.Source = "admin" - } - } - - authenticator, err := auth.CreateAuthenticator(mechanism, cred) - if err != nil { - return err - } - - handshakeOpts := &auth.HandshakeOptions{ - AppName: appName, - Authenticator: authenticator, - Compressors: comps, - ClusterClock: c.clock, - ServerAPI: c.serverAPI, - LoadBalanced: loadBalanced, - } - if mechanism == "" { - // Required for SASL mechanism negotiation during handshake - handshakeOpts.DBUser = cred.Source + "." + cred.Username - } - if opts.AuthenticateToAnything != nil && *opts.AuthenticateToAnything { - // Authenticate arbiters - handshakeOpts.PerformAuthentication = func(serv description.Server) bool { - return true - } - } - - handshaker = func(driver.Handshaker) driver.Handshaker { - return auth.Handshaker(nil, handshakeOpts) - } - } - connOpts = append(connOpts, topology.WithHandshaker(handshaker)) - // ConnectTimeout - if opts.ConnectTimeout != nil { - serverOpts = append(serverOpts, topology.WithHeartbeatTimeout( - func(time.Duration) time.Duration { return *opts.ConnectTimeout }, - )) - connOpts = append(connOpts, topology.WithConnectTimeout( - func(time.Duration) time.Duration { return *opts.ConnectTimeout }, - )) - } - // Dialer - if opts.Dialer != nil { - connOpts = append(connOpts, topology.WithDialer( - func(topology.Dialer) topology.Dialer { return opts.Dialer }, - )) - } - // Direct - if opts.Direct != nil && *opts.Direct { - topologyOpts = append(topologyOpts, topology.WithMode( - func(topology.MonitorMode) topology.MonitorMode { return topology.SingleMode }, - )) - } - // HeartbeatInterval - if opts.HeartbeatInterval != nil { - serverOpts = append(serverOpts, topology.WithHeartbeatInterval( - func(time.Duration) time.Duration { return *opts.HeartbeatInterval }, - )) - } - // Hosts - hosts := []string{"localhost:27017"} // default host - if len(opts.Hosts) > 0 { - hosts = opts.Hosts - } - topologyOpts = append(topologyOpts, topology.WithSeedList( - func(...string) []string { return hosts }, - )) - // LocalThreshold - c.localThreshold = defaultLocalThreshold - if opts.LocalThreshold != nil { - c.localThreshold = *opts.LocalThreshold - } - // MaxConIdleTime - if opts.MaxConnIdleTime != nil { - connOpts = append(connOpts, topology.WithIdleTimeout( - func(time.Duration) time.Duration { return *opts.MaxConnIdleTime }, - )) - } - // MaxPoolSize - if opts.MaxPoolSize != nil { - serverOpts = append( - serverOpts, - topology.WithMaxConnections(func(uint64) uint64 { return *opts.MaxPoolSize }), - ) - } - // MinPoolSize - if opts.MinPoolSize != nil { - serverOpts = append( - serverOpts, - topology.WithMinConnections(func(uint64) uint64 { return *opts.MinPoolSize }), - ) - } - // MaxConnecting - if opts.MaxConnecting != nil { - serverOpts = append( - serverOpts, - topology.WithMaxConnecting(func(uint64) uint64 { return *opts.MaxConnecting }), - ) - } - // PoolMonitor - if opts.PoolMonitor != nil { - serverOpts = append( - serverOpts, - topology.WithConnectionPoolMonitor(func(*event.PoolMonitor) *event.PoolMonitor { return opts.PoolMonitor }), - ) - } - // Monitor - if opts.Monitor != nil { - c.monitor = opts.Monitor - connOpts = append(connOpts, topology.WithMonitor( - func(*event.CommandMonitor) *event.CommandMonitor { return opts.Monitor }, - )) - } - // ServerMonitor - if opts.ServerMonitor != nil { - c.serverMonitor = opts.ServerMonitor - serverOpts = append( - serverOpts, - topology.WithServerMonitor(func(*event.ServerMonitor) *event.ServerMonitor { return opts.ServerMonitor }), - ) - - topologyOpts = append( - topologyOpts, - topology.WithTopologyServerMonitor(func(*event.ServerMonitor) *event.ServerMonitor { return opts.ServerMonitor }), - ) - } - // ReadConcern - c.readConcern = readconcern.New() - if opts.ReadConcern != nil { - c.readConcern = opts.ReadConcern - } - // ReadPreference - c.readPreference = readpref.Primary() - if opts.ReadPreference != nil { - c.readPreference = opts.ReadPreference - } - // Registry - c.registry = bson.DefaultRegistry - if opts.Registry != nil { - c.registry = opts.Registry - } - // ReplicaSet - if opts.ReplicaSet != nil { - topologyOpts = append(topologyOpts, topology.WithReplicaSetName( - func(string) string { return *opts.ReplicaSet }, - )) - } - // RetryWrites - c.retryWrites = true // retry writes on by default - if opts.RetryWrites != nil { - c.retryWrites = *opts.RetryWrites - } - c.retryReads = true - if opts.RetryReads != nil { - c.retryReads = *opts.RetryReads - } - // ServerSelectionTimeout - if opts.ServerSelectionTimeout != nil { - topologyOpts = append(topologyOpts, topology.WithServerSelectionTimeout( - func(time.Duration) time.Duration { return *opts.ServerSelectionTimeout }, - )) - } - // SocketTimeout - if opts.SocketTimeout != nil { - connOpts = append( - connOpts, - topology.WithReadTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }), - topology.WithWriteTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }), - ) - } - // TLSConfig - if opts.TLSConfig != nil { - connOpts = append(connOpts, topology.WithTLSConfig( - func(*tls.Config) *tls.Config { - return opts.TLSConfig - }, - )) - } - // WriteConcern - if opts.WriteConcern != nil { - c.writeConcern = opts.WriteConcern - } - // AutoEncryptionOptions - if opts.AutoEncryptionOptions != nil { - if err := c.configureAutoEncryption(opts); err != nil { - return err - } - } else { - c.cryptFLE = opts.Crypt - } - - // OCSP cache - ocspCache := ocsp.NewCache() - connOpts = append( - connOpts, - topology.WithOCSPCache(func(ocsp.Cache) ocsp.Cache { return ocspCache }), - ) - - // Disable communication with external OCSP responders. - if opts.DisableOCSPEndpointCheck != nil { - connOpts = append( - connOpts, - topology.WithDisableOCSPEndpointCheck(func(bool) bool { return *opts.DisableOCSPEndpointCheck }), - ) - } - - // LoadBalanced - if opts.LoadBalanced != nil { - topologyOpts = append( - topologyOpts, - topology.WithLoadBalanced(func(bool) bool { return *opts.LoadBalanced }), - ) - serverOpts = append( - serverOpts, - topology.WithServerLoadBalanced(func(bool) bool { return *opts.LoadBalanced }), - ) - connOpts = append( - connOpts, - topology.WithConnectionLoadBalanced(func(bool) bool { return *opts.LoadBalanced }), - ) - } - - serverOpts = append( - serverOpts, - topology.WithClock(func(*session.ClusterClock) *session.ClusterClock { return c.clock }), - topology.WithConnectionOptions(func(...topology.ConnectionOption) []topology.ConnectionOption { return connOpts }), - ) - c.topologyOptions = append(topologyOpts, topology.WithServerOptions( - func(...topology.ServerOption) []topology.ServerOption { return serverOpts }, - )) - - // Deployment - if opts.Deployment != nil { - // topology options: WithSeedlist, WithURI, WithSRVServiceName and WithSRVMaxHosts - // server options: WithClock and WithConnectionOptions - if len(serverOpts) > 2 || len(topologyOpts) > 4 { - return errors.New("cannot specify topology or server options with a deployment") - } - c.deployment = opts.Deployment - } - - return nil -} - func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) error { + c.encryptedFieldsMap = clientOpts.AutoEncryptionOptions.EncryptedFieldsMap if err := c.configureKeyVaultClientFLE(clientOpts); err != nil { return err } if err := c.configureMetadataClientFLE(clientOpts); err != nil { return err } - if err := c.configureMongocryptdClientFLE(clientOpts.AutoEncryptionOptions); err != nil { + + mc, err := c.newMongoCrypt(clientOpts.AutoEncryptionOptions) + if err != nil { + return err + } + + // If the crypt_shared library was loaded successfully, signal to the mongocryptd client creator + // that it can bypass spawning mongocryptd. + cryptSharedLibAvailable := mc.CryptSharedLibVersionString() != "" + mongocryptdFLE, err := newMongocryptdClient(cryptSharedLibAvailable, clientOpts.AutoEncryptionOptions) + if err != nil { return err } - return c.configureCryptFLE(clientOpts.AutoEncryptionOptions) + c.mongocryptdFLE = mongocryptdFLE + + c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions) + return nil } func (c *Client) getOrCreateInternalClient(clientOpts *options.ClientOptions) (*Client, error) { @@ -763,32 +510,90 @@ func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) e return err } -func (c *Client) configureMongocryptdClientFLE(opts *options.AutoEncryptionOptions) error { - var err error - c.mongocryptdFLE, err = newMcryptClient(opts) - return err -} - -func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { +func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) { // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) for k, v := range opts.SchemaMap { schema, err := transformBsoncoreDocument(c.registry, v, true, "schemaMap") if err != nil { - return err + return nil, err } cryptSchemaMap[k] = schema } + + // convert schemas in EncryptedFieldsMap to bsoncore documents + cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) + for k, v := range opts.EncryptedFieldsMap { + encryptedFields, err := transformBsoncoreDocument(c.registry, v, true, "encryptedFieldsMap") + if err != nil { + return nil, err + } + cryptEncryptedFieldsMap[k] = encryptedFields + } + kmsProviders, err := transformBsoncoreDocument(c.registry, opts.KmsProviders, true, "kmsProviders") if err != nil { - return fmt.Errorf("error creating KMS providers document: %v", err) + return nil, fmt.Errorf("error creating KMS providers document: %v", err) } - // configure options - var bypass bool - if opts.BypassAutoEncryption != nil { - bypass = *opts.BypassAutoEncryption + // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one + // was set. + cryptSharedLibPath := "" + if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok { + str, ok := val.(string) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibPath" to be a string, but is a %T`, val) + } + cryptSharedLibPath = str + } + + // Explicitly disable loading the crypt_shared library if requested. Note that this is ONLY + // intended for use from tests; there is no supported public API for explicitly disabling + // loading the crypt_shared library. + cryptSharedLibDisabled := false + if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { + cryptSharedLibDisabled = v.(bool) + } + + bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + SetLocalSchemaMap(cryptSchemaMap). + SetBypassQueryAnalysis(bypassQueryAnalysis). + SetEncryptedFieldsMap(cryptEncryptedFieldsMap). + SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). + SetCryptSharedLibOverridePath(cryptSharedLibPath)) + if err != nil { + return nil, err + } + + var cryptSharedLibRequired bool + if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok { + b, ok := val.(bool) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibRequired" to be a bool, but is a %T`, val) + } + cryptSharedLibRequired = b } + + // If the "cryptSharedLibRequired" extra option is set to true, check the MongoCrypt version + // string to confirm that the library was successfully loaded. If the version string is empty, + // return an error indicating that we couldn't load the crypt_shared library. + if cryptSharedLibRequired && mc.CryptSharedLibVersionString() == "" { + return nil, errors.New( + `AutoEncryption extra option "cryptSharedLibRequired" is true, but we failed to load the crypt_shared library`) + } + + return mc, nil +} + +//nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set. +func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) { + bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption kr := keyRetriever{coll: c.keyVaultCollFLE} var cir collInfoRetriever // If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever @@ -798,40 +603,25 @@ func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { cir = collInfoRetriever{client: c.metadataClientFLE} } - cryptOpts := &driver.CryptOptions{ + c.cryptFLE = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, - KmsProviders: kmsProviders, TLSConfig: opts.TLSConfig, + HTTPClient: opts.HTTPClient, BypassAutoEncryption: bypass, - SchemaMap: cryptSchemaMap, - } - - c.cryptFLE, err = driver.NewCrypt(cryptOpts) - return err + }) } // validSession returns an error if the session doesn't belong to the client func (c *Client) validSession(sess *session.Client) error { - if sess != nil && !uuid.Equal(sess.ClientID, c.id) { + if sess != nil && sess.ClientID != c.id { return ErrWrongClient } return nil } -// convertToDriverAPIOptions converts a options.ServerAPIOptions instance to a driver.ServerAPIOptions. -func convertToDriverAPIOptions(s *options.ServerAPIOptions) *driver.ServerAPIOptions { - driverOpts := driver.NewServerAPIOptions(string(s.ServerAPIVersion)) - if s.Strict != nil { - driverOpts.SetStrict(*s.Strict) - } - if s.DeprecationErrors != nil { - driverOpts.SetDeprecationErrors(*s.DeprecationErrors) - } - return driverOpts -} - // Database returns a handle for a database with the given name configured with the given DatabaseOptions. func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Database { return newDatabase(c, name, opts...) @@ -843,9 +633,9 @@ func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Databa // databases are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include // all databases. // -// The opts paramter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). +// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) { if ctx == nil { ctx = context.Background() @@ -885,7 +675,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... op := operation.NewListDatabases(filterDoc). Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor). ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.deployment).Crypt(c.cryptFLE). - ServerAPI(c.serverAPI) + ServerAPI(c.serverAPI).Timeout(c.timeout) if ldo.NameOnly != nil { op = op.NameOnly(*ldo.NameOnly) @@ -918,7 +708,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... // The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions // documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) { opts = append(opts, options.ListDatabases().SetNameOnly(true)) @@ -939,6 +729,9 @@ func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts // SessionContext must be used as the Context parameter for any operations in the fn callback that should be executed // under the session. // +// WithSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// WithSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the one provided. // // Any error returned by the fn callback will be returned without any modifications. @@ -951,6 +744,9 @@ func WithSession(ctx context.Context, sess Session, fn func(SessionContext) erro // be executed under a session. After the callback returns, the created Session is ended, meaning that any in-progress // transactions started by fn will be aborted even if fn returns an error. // +// UseSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// UseSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the newly created one. // // Any error returned by the fn callback will be returned without any modifications. @@ -959,6 +755,9 @@ func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) } // UseSessionWithOptions operates like UseSession but uses the given SessionOptions to create the Session. +// +// UseSessionWithOptions is safe to call from multiple goroutines concurrently. However, the SessionContext passed to +// the UseSessionWithOptions callback function is not safe for concurrent use by multiple goroutines. func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error { defaultSess, err := c.StartSession(opts) if err != nil { @@ -970,13 +769,13 @@ func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.Sessio } // Watch returns a change stream for all changes on the deployment. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The client must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil or empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for a list +// nil or empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for a list // of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the mongo.Pipeline{} // type can be used. // @@ -1003,7 +802,15 @@ func (c *Client) Watch(ctx context.Context, pipeline interface{}, // NumberSessionsInProgress returns the number of sessions that have been started for this client but have not been // closed (i.e. EndSession has not been called). func (c *Client) NumberSessionsInProgress() int { - return c.sessionPool.CheckedOut() + // The underlying session pool uses an int64 for checkedOut to allow atomic + // access. We convert to an int here to maintain backward compatibility with + // older versions of the driver that did not atomically access checkedOut. + return int(c.sessionPool.CheckedOut()) +} + +// Timeout returns the timeout set for this client. +func (c *Client) Timeout() *time.Duration { + return c.timeout } func (c *Client) createBaseCursorOptions() driver.CursorOptions { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index fe4646b..59c550b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -17,7 +17,8 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" - cryptOpts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) // ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values. @@ -47,36 +48,57 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti return nil, fmt.Errorf("error creating KMS providers map: %v", err) } + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + // Explicitly disable loading the crypt_shared library for the Crypt used for + // ClientEncryption because it's only needed for AutoEncryption and we don't expect users to + // have the crypt_shared library installed if they're using ClientEncryption. + SetCryptSharedLibDisabled(true)) + if err != nil { + return nil, err + } + // create Crypt kr := keyRetriever{coll: ce.keyVaultColl} cir := collInfoRetriever{client: ce.keyVaultClient} - ce.crypt, err = driver.NewCrypt(&driver.CryptOptions{ - KeyFn: kr.cryptKeys, - CollInfoFn: cir.cryptCollInfo, - KmsProviders: kmsProviders, - TLSConfig: ceo.TLSConfig, + ce.crypt = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, + KeyFn: kr.cryptKeys, + CollInfoFn: cir.cryptCollInfo, + TLSConfig: ceo.TLSConfig, + HTTPClient: ceo.HTTPClient, }) - if err != nil { - return nil, err - } return ce, nil } -// CreateDataKey creates a new key document and inserts it into the key vault collection. Returns the _id of the -// created document. -func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, opts ...*options.DataKeyOptions) (primitive.Binary, error) { - // translate opts to cryptOpts.DataKeyOptions +// AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the +// given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + keyAltNameDoc := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + update := bsoncore.NewDocumentBuilder().AppendDocument("$addToSet", keyAltNameDoc).Build() + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// CreateDataKey creates a new key document and inserts into the key vault collection. Returns the _id of the created +// document as a UUID (BSON binary subtype 0x04). +func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, + opts ...*options.DataKeyOptions) (primitive.Binary, error) { + + // translate opts to mcopts.DataKeyOptions dko := options.MergeDataKeyOptions(opts...) - co := cryptOpts.DataKey().SetKeyAltNames(dko.KeyAltNames) + co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames) if dko.MasterKey != nil { keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, dko.MasterKey, true, "masterKey") if err != nil { return primitive.Binary{}, err } - co.SetMasterKey(keyDoc) } + if dko.KeyMaterial != nil { + co.SetKeyMaterial(dko.KeyMaterial) + } // create data key document dataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co) @@ -95,9 +117,11 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin } // Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). -func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts ...*options.EncryptOptions) (primitive.Binary, error) { +func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, + opts ...*options.EncryptOptions) (primitive.Binary, error) { + eo := options.MergeEncryptOptions(opts...) - transformed := cryptOpts.ExplicitEncryption() + transformed := mcopts.ExplicitEncryption() if eo.KeyID != nil { transformed.SetKeyID(*eo.KeyID) } @@ -105,6 +129,11 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts transformed.SetKeyAltName(*eo.KeyAltName) } transformed.SetAlgorithm(eo.Algorithm) + transformed.SetQueryType(eo.QueryType) + + if eo.ContentionFactor != nil { + transformed.SetContentionFactor(*eo.ContentionFactor) + } subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed) if err != nil { @@ -130,6 +159,151 @@ func (ce *ClientEncryption) Close(ctx context.Context) error { return ce.keyVaultClient.Disconnect(ctx) } +// DeleteKey removes the key document with the given UUID (BSON binary subtype 0x04) from the key vault collection. +// Returns the result of the internal deleteOne() operation on the key vault collection. +func (ce *ClientEncryption) DeleteKey(ctx context.Context, id primitive.Binary) (*DeleteResult, error) { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.DeleteOne(ctx, filter) +} + +// GetKeyByAltName returns a key document in the key vault collection with the given keyAltName. +func (ce *ClientEncryption) GetKeyByAltName(ctx context.Context, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKey finds a single key document with the given UUID (BSON binary subtype 0x04). Returns the result of the +// internal find() operation on the key vault collection. +func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKeys finds all documents in the key vault collection. Returns the result of the internal find() operation on the +// key vault collection. +func (ce *ClientEncryption) GetKeys(ctx context.Context) (*Cursor, error) { + return ce.keyVaultColl.Find(ctx, bson.D{}) +} + +// RemoveKeyAltName removes a keyAltName from the keyAltNames array of the key document in the key vault collection with +// the given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) RemoveKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + update := bson.A{bson.D{{"$set", bson.D{{"keyAltNames", bson.D{{"$cond", bson.A{bson.D{{"$eq", + bson.A{"$keyAltNames", bson.A{keyAltName}}}}, "$$REMOVE", bson.D{{"$filter", + bson.D{{"input", "$keyAltNames"}, {"cond", bson.D{{"$ne", bson.A{"$$this", keyAltName}}}}}}}}}}}}}}} + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// setRewrapManyDataKeyWriteModels will prepare the WriteModel slice for a bulk updating rewrapped documents. +func setRewrapManyDataKeyWriteModels(rewrappedDocuments []bsoncore.Document, writeModels *[]WriteModel) error { + const idKey = "_id" + const keyMaterial = "keyMaterial" + const masterKey = "masterKey" + + if writeModels == nil { + return fmt.Errorf("writeModels pointer not set for location referenced") + } + + // Append a slice of WriteModel with the update document per each rewrappedDoc _id filter. + for _, rewrappedDocument := range rewrappedDocuments { + // Prepare the new master key for update. + masterKeyValue, err := rewrappedDocument.LookupErr(masterKey) + if err != nil { + return err + } + masterKeyDoc := masterKeyValue.Document() + + // Prepare the new material key for update. + keyMaterialValue, err := rewrappedDocument.LookupErr(keyMaterial) + if err != nil { + return err + } + keyMaterialSubtype, keyMaterialData := keyMaterialValue.Binary() + keyMaterialBinary := primitive.Binary{Subtype: keyMaterialSubtype, Data: keyMaterialData} + + // Prepare the _id filter for documents to update. + id, err := rewrappedDocument.LookupErr(idKey) + if err != nil { + return err + } + + idSubtype, idData, ok := id.BinaryOK() + if !ok { + return fmt.Errorf("expected to assert %q as binary, got type %T", idKey, id) + } + binaryID := primitive.Binary{Subtype: idSubtype, Data: idData} + + // Append the mutable document to the slice for bulk update. + *writeModels = append(*writeModels, NewUpdateOneModel(). + SetFilter(bson.D{{idKey, binaryID}}). + SetUpdate( + bson.D{ + {"$set", bson.D{{keyMaterial, keyMaterialBinary}, {masterKey, masterKeyDoc}}}, + {"$currentDate", bson.D{{"updateDate", true}}}, + }, + )) + } + return nil +} + +// RewrapManyDataKey decrypts and encrypts all matching data keys with a possibly new masterKey value. For all +// matching documents, this method will overwrite the "masterKey", "updateDate", and "keyMaterial". On error, some +// matching data keys may have been rewrapped. +// libmongocrypt 1.5.2 is required. An error is returned if the detected version of libmongocrypt is less than 1.5.2. +func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interface{}, + opts ...*options.RewrapManyDataKeyOptions) (*RewrapManyDataKeyResult, error) { + + // libmongocrypt versions 1.5.0 and 1.5.1 have a severe bug in RewrapManyDataKey. + // Check if the version string starts with 1.5.0 or 1.5.1. This accounts for pre-release versions, like 1.5.0-rc0. + libmongocryptVersion := mongocrypt.Version() + if strings.HasPrefix(libmongocryptVersion, "1.5.0") || strings.HasPrefix(libmongocryptVersion, "1.5.1") { + return nil, fmt.Errorf("RewrapManyDataKey requires libmongocrypt 1.5.2 or newer. Detected version: %v", libmongocryptVersion) + } + + rmdko := options.MergeRewrapManyDataKeyOptions(opts...) + if ctx == nil { + ctx = context.Background() + } + + // Transfer rmdko options to /x/ package options to publish the mongocrypt feed. + co := mcopts.RewrapManyDataKey() + if rmdko.MasterKey != nil { + keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, rmdko.MasterKey, true, "masterKey") + if err != nil { + return nil, err + } + co.SetMasterKey(keyDoc) + } + if rmdko.Provider != nil { + co.SetProvider(*rmdko.Provider) + } + + // Prepare the filters and rewrap the data key using mongocrypt. + filterdoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, filter, true, "filter") + if err != nil { + return nil, err + } + + rewrappedDocuments, err := ce.crypt.RewrapDataKey(ctx, filterdoc, co) + if err != nil { + return nil, err + } + if len(rewrappedDocuments) == 0 { + // If there are no documents to rewrap, then do nothing. + return new(RewrapManyDataKeyResult), nil + } + + // Prepare the WriteModel slice for bulk updating the rewrapped data keys. + models := []WriteModel{} + if err := setRewrapManyDataKeyWriteModels(rewrappedDocuments, &models); err != nil { + return nil, err + } + + bulkWriteResults, err := ce.keyVaultColl.BulkWrite(ctx, models) + return &RewrapManyDataKeyResult{BulkWriteResult: bulkWriteResults}, err +} + // splitNamespace takes a namespace in the form "database.collection" and returns (database name, collection name) func splitNamespace(ns string) (string, string) { firstDot := strings.Index(ns, ".") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index a5aaa35..2a69706 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -166,7 +167,7 @@ func (coll *Collection) Database() *Database { return coll.db } -// BulkWrite performs a bulk write operation (https://docs.mongodb.com/manual/core/bulk-write-operations/). +// BulkWrite performs a bulk write operation (https://www.mongodb.com/docs/manual/core/bulk-write-operations/). // // The models parameter must be a slice of operations to be executed in this bulk write. It cannot be nil or empty. // All of the models must be non-nil. See the mongo.WriteModel documentation for a list of valid model types and @@ -218,6 +219,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, bwo := options.MergeBulkWriteOptions(opts...) op := bulkWrite{ + comment: bwo.Comment, ordered: bwo.Ordered, bypassDocumentValidation: bwo.BypassDocumentValidation, models: models, @@ -225,6 +227,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, collection: coll, selector: selector, writeConcern: wc, + let: bwo.Let, } err = op.execute(ctx) @@ -280,11 +283,18 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) imo := options.MergeInsertManyOptions(opts...) if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation { op = op.BypassDocumentValidation(*imo.BypassDocumentValidation) } + if imo.Comment != nil { + comment, err := transformValue(coll.registry, imo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if imo.Ordered != nil { op = op.Ordered(*imo.Ordered) } @@ -323,7 +333,7 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertOneOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertOne(ctx context.Context, document interface{}, opts ...*options.InsertOneOptions) (*InsertOneResult, error) { @@ -333,6 +343,9 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, if ioOpts.BypassDocumentValidation != nil && *ioOpts.BypassDocumentValidation { imOpts.SetBypassDocumentValidation(*ioOpts.BypassDocumentValidation) } + if ioOpts.Comment != nil { + imOpts.SetComment(ioOpts.Comment) + } res, err := coll.insert(ctx, []interface{}{document}, imOpts) rr, err := processWriteError(err) @@ -352,7 +365,7 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertManyOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertMany(ctx context.Context, documents []interface{}, opts ...*options.InsertManyOptions) (*InsertManyResult, error) { @@ -450,10 +463,24 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + if do.Comment != nil { + comment, err := transformValue(coll.registry, do.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if do.Hint != nil { op = op.Hint(true) } + if do.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, do.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } // deleteMany cannot be retried retryMode := driver.RetryNone @@ -465,7 +492,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn if rr&expectedRr == 0 { return nil, err } - return &DeleteResult{DeletedCount: int64(op.Result().N)}, err + return &DeleteResult{DeletedCount: op.Result().N}, err } // DeleteOne executes a delete command to delete at most one document from the collection. @@ -477,7 +504,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -493,7 +520,7 @@ func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -547,11 +574,26 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). - ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI) + ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) + if uo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation { op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) } + if uo.Comment != nil { + comment, err := transformValue(coll.registry, uo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } retry := driver.RetryNone // retryable writes are only enabled updateOne/replaceOne operations if !multi && coll.client.retryWrites { @@ -567,8 +609,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc opRes := op.Result() res := &UpdateResult{ - MatchedCount: int64(opRes.N), - ModifiedCount: int64(opRes.NModified), + MatchedCount: opRes.N, + ModifiedCount: opRes.NModified, UpsertedCount: int64(len(opRes.Upserted)), } if len(opRes.Upserted) > 0 { @@ -586,12 +628,12 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc // the operation will succeed and an UpdateResult with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { if id == nil { @@ -608,12 +650,12 @@ func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update i // matched set and MatchedCount will equal 1. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -636,12 +678,12 @@ func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, updat // with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected documents. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -665,11 +707,11 @@ func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, upda // selected from the matched set and MatchedCount will equal 1. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.ReplaceOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) { @@ -693,11 +735,16 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, updateOptions := make([]*options.UpdateOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } uOpts := options.Update() uOpts.BypassDocumentValidation = opt.BypassDocumentValidation uOpts.Collation = opt.Collation uOpts.Upsert = opt.Upsert uOpts.Hint = opt.Hint + uOpts.Let = opt.Let + uOpts.Comment = opt.Comment updateOptions = append(updateOptions, uOpts) } @@ -709,12 +756,12 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, // The pipeline parameter must be an array of documents, each representing an aggregation stage. The pipeline cannot // be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of // valid stages in aggregations. // // The opts parameter can be used to specify options for the operation (see the options.AggregateOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -735,9 +782,8 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, return aggregate(a) } -// aggreate is the helper method for Aggregate -func aggregate(a aggregateParams) (*Cursor, error) { - +// aggregate is the helper method for Aggregate +func aggregate(a aggregateParams) (cur *Cursor, err error) { if a.ctx == nil { a.ctx = context.Background() } @@ -748,6 +794,12 @@ func aggregate(a aggregateParams) (*Cursor, error) { } sess := sessionFromContext(a.ctx) + // Always close any created implicit sessions if aggregate returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && a.client.sessionPool != nil { sess, err = session.NewClientSession(a.client.sessionPool, a.client.id, session.Implicit) if err != nil { @@ -793,7 +845,9 @@ func aggregate(a aggregateParams) (*Cursor, error) { Deployment(a.client.deployment). Crypt(a.client.cryptFLE). ServerAPI(a.client.serverAPI). - HasOutputStage(hasOutputStage) + HasOutputStage(hasOutputStage). + Timeout(a.client.timeout). + MaxTime(ao.MaxTime) if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) @@ -809,19 +863,21 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Collation != nil { op.Collation(bsoncore.Document(ao.Collation.ToDocument())) } - if ao.MaxTime != nil { - op.MaxTimeMS(int64(*ao.MaxTime / time.Millisecond)) - } if ao.MaxAwaitTime != nil { cursorOpts.MaxTimeMS = int64(*ao.MaxAwaitTime / time.Millisecond) } if ao.Comment != nil { op.Comment(*ao.Comment) + + commentVal, err := transformValue(a.registry, ao.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if ao.Hint != nil { hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hintVal) @@ -829,11 +885,24 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Let != nil { let, err := transformBsoncoreDocument(a.registry, ao.Let, true, "let") if err != nil { - closeImplicitSession(sess) return nil, err } op.Let(let) } + if ao.Custom != nil { + // Marshal all custom options before passing to the aggregate operation. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range ao.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(a.registry, optionValue) + if err != nil { + return nil, err + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + op.CustomOptions(customOptions) + } retry := driver.RetryNone if a.retryRead && !hasOutputStage { @@ -843,7 +912,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { err = op.Execute(a.ctx) if err != nil { - closeImplicitSession(sess) if wce, ok := err.(driver.WriteCommandError); ok && wce.WriteConcernError != nil { return nil, *convertDriverWriteConcernError(wce.WriteConcernError) } @@ -852,7 +920,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } cursor, err := newCursorWithSession(bc, a.registry, sess) @@ -901,12 +968,13 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewAggregate(pipelineArr).Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector).ClusterClock(coll.client.clock).Database(coll.db.name). - Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout).MaxTime(countOpts.MaxTime) if countOpts.Collation != nil { op.Collation(bsoncore.Document(countOpts.Collation.ToDocument())) } - if countOpts.MaxTime != nil { - op.MaxTimeMS(int64(*countOpts.MaxTime / time.Millisecond)) + if countOpts.Comment != nil { + op.Comment(*countOpts.Comment) } if countOpts.Hint != nil { hintVal, err := transformValue(coll.registry, countOpts.Hint, false, "hint") @@ -950,7 +1018,7 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, // The opts parameter can be used to specify options for the operation (see the options.EstimatedDocumentCountOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/count/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/count/. func (coll *Collection) EstimatedDocumentCount(ctx context.Context, opts ...*options.EstimatedDocumentCountOptions) (int64, error) { @@ -979,16 +1047,23 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, rc = nil } + co := options.MergeEstimatedDocumentCountOptions(opts...) + selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewCount().Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout).MaxTime(co.MaxTime) - co := options.MergeEstimatedDocumentCountOptions(opts...) - if co.MaxTime != nil { - op = op.MaxTimeMS(int64(*co.MaxTime / time.Millisecond)) + if co.Comment != nil { + comment, err := transformValue(coll.registry, co.Comment, false, "comment") + if err != nil { + return 0, err + } + op = op.Comment(comment) } + retry := driver.RetryNone if coll.client.retryReads { retry = driver.RetryOncePerCommand @@ -996,7 +1071,6 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, op.Retry(retry) err = op.Execute(ctx) - return op.Result().N, replaceErrors(err) } @@ -1009,7 +1083,7 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, // // The opts parameter can be used to specify options for the operation (see the options.DistinctOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/distinct/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/distinct/. func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{}, opts ...*options.DistinctOptions) ([]interface{}, error) { @@ -1049,13 +1123,18 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout).MaxTime(option.MaxTime) if option.Collation != nil { op.Collation(bsoncore.Document(option.Collation.ToDocument())) } - if option.MaxTime != nil { - op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) + if option.Comment != nil { + comment, err := transformValue(coll.registry, option.Comment, true, "comment") + if err != nil { + return nil, err + } + op.Comment(comment) } retry := driver.RetryNone if coll.client.retryReads { @@ -1098,9 +1177,9 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // // The opts parameter can be used to specify options for the operation (see the options.FindOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, - opts ...*options.FindOptions) (*Cursor, error) { + opts ...*options.FindOptions) (cur *Cursor, err error) { if ctx == nil { ctx = context.Background() @@ -1112,6 +1191,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } sess := sessionFromContext(ctx) + // Always close any created implicit sessions if Find returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && coll.client.sessionPool != nil { var err error sess, err = session.NewClientSession(coll.client.sessionPool, coll.client.id, session.Implicit) @@ -1122,7 +1207,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, err = coll.client.validSession(sess) if err != nil { - closeImplicitSession(sess) return nil, err } @@ -1131,16 +1215,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, rc = nil } + fo := options.MergeFindOptions(opts...) + selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewFind(f). Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). - Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout).MaxTime(fo.MaxTime) - fo := options.MergeFindOptions(opts...) cursorOpts := coll.client.createBaseCursorOptions() - if fo.AllowDiskUse != nil { op.AllowDiskUse(*fo.AllowDiskUse) } @@ -1156,6 +1241,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } if fo.Comment != nil { op.Comment(*fo.Comment) + + commentVal, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if fo.CursorType != nil { switch *fo.CursorType { @@ -1169,11 +1260,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Hint != nil { hint, err := transformValue(coll.registry, fo.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return nil, err + } + op.Let(let) + } if fo.Limit != nil { limit := *fo.Limit if limit < 0 { @@ -1186,7 +1283,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Max != nil { max, err := transformBsoncoreDocument(coll.registry, fo.Max, true, "max") if err != nil { - closeImplicitSession(sess) return nil, err } op.Max(max) @@ -1194,13 +1290,9 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.MaxAwaitTime != nil { cursorOpts.MaxTimeMS = int64(*fo.MaxAwaitTime / time.Millisecond) } - if fo.MaxTime != nil { - op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) - } if fo.Min != nil { min, err := transformBsoncoreDocument(coll.registry, fo.Min, true, "min") if err != nil { - closeImplicitSession(sess) return nil, err } op.Min(min) @@ -1214,7 +1306,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") if err != nil { - closeImplicitSession(sess) return nil, err } op.Projection(proj) @@ -1234,7 +1325,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Sort != nil { sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") if err != nil { - closeImplicitSession(sess) return nil, err } op.Sort(sort) @@ -1246,13 +1336,11 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op = op.Retry(retry) if err = op.Execute(ctx); err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } return newCursorWithSession(bc, coll.registry, sess) @@ -1266,7 +1354,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for this operation (see the options.FindOneOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *SingleResult { @@ -1276,6 +1364,9 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, findOpts := make([]*options.FindOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } findOpts = append(findOpts, &options.FindOptions{ AllowPartialResults: opt.AllowPartialResults, BatchSize: opt.BatchSize, @@ -1369,7 +1460,7 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd // The opts parameter can be used to specify options for the operation (see the options.FindOneAndDeleteOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*options.FindOneAndDeleteOptions) *SingleResult { @@ -1378,12 +1469,17 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} return &SingleResult{err: err} } fod := options.MergeFindOneAndDeleteOptions(opts...) - op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout). + MaxTime(fod.MaxTime) if fod.Collation != nil { op = op.Collation(bsoncore.Document(fod.Collation.ToDocument())) } - if fod.MaxTime != nil { - op = op.MaxTimeMS(int64(*fod.MaxTime / time.Millisecond)) + if fod.Comment != nil { + comment, err := transformValue(coll.registry, fod.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) } if fod.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fod.Projection, true, "projection") @@ -1406,6 +1502,13 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fod.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fod.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1418,12 +1521,12 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndReplaceOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult { @@ -1441,15 +1544,19 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ fo := options.MergeFindOneAndReplaceOptions(opts...) op := operation.NewFindAndModify(f).Update(bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: r}). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).MaxTime(fo.MaxTime) if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation { op = op.BypassDocumentValidation(*fo.BypassDocumentValidation) } if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } - if fo.MaxTime != nil { - op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) } if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") @@ -1478,6 +1585,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1490,13 +1604,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndUpdateOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult { @@ -1510,7 +1624,8 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } fo := options.MergeFindOneAndUpdateOptions(opts...) - op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout). + MaxTime(fo.MaxTime) u, err := transformUpdateValue(coll.registry, update, true) if err != nil { @@ -1531,8 +1646,12 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } - if fo.MaxTime != nil { - op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) } if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") @@ -1561,18 +1680,25 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } // Watch returns a change stream for all changes on the corresponding collection. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Collection must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -1602,6 +1728,69 @@ func (coll *Collection) Indexes() IndexView { // Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop // a collection that does not exist on the server. func (coll *Collection) Drop(ctx context.Context) error { + // Follow Client-Side Encryption specification to check for encryptedFields. + // Drop does not have an encryptedFields option. See: GODRIVER-2413. + // Check for encryptedFields from the client EncryptedFieldsMap. + // Check for encryptedFields from the server if EncryptedFieldsMap is set. + ef := coll.db.getEncryptedFieldsFromMap(coll.name) + if ef == nil && coll.db.client.encryptedFieldsMap != nil { + var err error + if ef, err = coll.db.getEncryptedFieldsFromServer(ctx, coll.name); err != nil { + return err + } + } + + if ef != nil { + return coll.dropEncryptedCollection(ctx, ef) + } + + return coll.drop(ctx) +} + +// dropEncryptedCollection drops a collection with EncryptedFields. +func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { + efBSON, err := transformBsoncoreDocument(coll.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Drop the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Drop ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedStateCollection) + if err != nil { + return err + } + if err := coll.db.Collection(escCollection).drop(ctx); err != nil { + return err + } + + // Drop ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + if err := coll.db.Collection(eccCollection).drop(ctx); err != nil { + return err + } + + // Drop ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + if err := coll.db.Collection(ecocCollection).drop(ctx); err != nil { + return err + } + + // Drop the data collection. + if err := coll.drop(ctx); err != nil { + return err + } + return nil +} + +// drop drops a collection without EncryptedFields. +func (coll *Collection) drop(ctx context.Context) error { if ctx == nil { ctx = context.Background() } @@ -1636,7 +1825,7 @@ func (coll *Collection) Drop(ctx context.Context) error { ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) err = op.Execute(ctx) // ignore namespace not found erorrs diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index 3ec03ba..99f8695 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -67,6 +68,47 @@ func newEmptyCursor() *Cursor { return &Cursor{bc: driver.NewEmptyBatchCursor()} } +// NewCursorFromDocuments creates a new Cursor pre-loaded with the provided documents, error and registry. If no registry is provided, +// bson.DefaultRegistry will be used. +// +// The documents parameter must be a slice of documents. The slice may be nil or empty, but all elements must be non-nil. +func NewCursorFromDocuments(documents []interface{}, err error, registry *bsoncodec.Registry) (*Cursor, error) { + if registry == nil { + registry = bson.DefaultRegistry + } + + // Convert documents slice to a sequence-style byte array. + var docsBytes []byte + for _, doc := range documents { + switch t := doc.(type) { + case nil: + return nil, ErrNilDocument + case bsonx.Doc: + doc = t.Copy() + case []byte: + // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. + doc = bson.Raw(t) + } + var marshalErr error + docsBytes, marshalErr = bson.MarshalAppendWithRegistry(registry, docsBytes, doc) + if marshalErr != nil { + return nil, marshalErr + } + } + + c := &Cursor{ + bc: driver.NewBatchCursorFromDocuments(docsBytes), + registry: registry, + err: err, + } + + // Initialize batch and batchLength here. The underlying batch cursor will be preloaded with the + // provided contents, and thus already has a batch before calls to Next/TryNext. + c.batch = c.bc.Batch() + c.batchLength = c.bc.Batch().DocumentCount() + return c, nil +} + // ID returns the ID of this cursor, or 0 if the cursor has been closed or exhausted. func (c *Cursor) ID() int64 { return c.bc.ID() } @@ -83,7 +125,7 @@ func (c *Cursor) Next(ctx context.Context) bool { // TryNext attempts to get the next document for this cursor. It returns true if there were no errors and the next // document is available. This is only recommended for use with tailable cursors as a non-blocking alternative to -// Next. See https://docs.mongodb.com/manual/core/tailable-cursors/ for more information about tailable cursors. +// Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors. // // TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next // document is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err(). @@ -204,7 +246,10 @@ func (c *Cursor) All(ctx context.Context, results interface{}) error { var index int var err error - defer c.Close(ctx) + // Defer a call to Close to try to clean up the cursor server-side when all + // documents have not been exhausted. Use context.Background() to ensure Close + // completes even if the context passed to All has errored. + defer c.Close(context.Background()) batch := c.batch // exhaust the current batch before iterating the batch cursor for { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index 2078733..cd93fb8 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -13,12 +13,12 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -107,12 +107,12 @@ func (db *Database) Collection(name string, opts ...*options.CollectionOptions) // The pipeline parameter must be a slice of documents, each representing an aggregation stage. The pipeline // cannot be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid // stages in database-level aggregations. // // The opts parameter can be used to specify options for this operation (see the options.AggregateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (db *Database) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -176,7 +176,8 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). - Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI), sess, nil + Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). + Timeout(db.client.timeout), sess, nil } // RunCommand executes the given command against the database. This function does not obey the Database's read @@ -184,11 +185,13 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. -// Specifying API versioning options in the command document and declaring an API version on the client is not supported. -// The behavior of RunCommand is undefined in this case. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommand is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult { if ctx == nil { ctx = context.Background() @@ -217,9 +220,13 @@ func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommandCursor is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -302,7 +309,10 @@ func (db *Database) Drop(ctx context.Context) error { // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running +// against MongoDB version 2.6. func (db *Database) ListCollectionSpecifications(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]*CollectionSpecification, error) { @@ -336,7 +346,10 @@ func (db *Database) ListCollectionSpecifications(ctx context.Context, filter int // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -372,7 +385,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor). ServerSelector(selector).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE). - ServerAPI(db.client.serverAPI) + ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() if lco.NameOnly != nil { @@ -382,6 +395,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt cursorOpts.BatchSize = *lco.BatchSize op = op.BatchSize(*lco.BatchSize) } + if lco.AuthorizedCollections != nil { + op = op.AuthorizedCollections(*lco.AuthorizedCollections) + } retry := driver.RetryNone if db.client.retryReads { @@ -414,7 +430,10 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) { opts = append(opts, options.ListCollections().SetNameOnly(true)) @@ -427,19 +446,13 @@ func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, names := make([]string, 0) for res.Next(ctx) { - next := &bsonx.Doc{} - err = res.Decode(next) - if err != nil { - return nil, err - } - - elem, err := next.LookupErr("name") + elem, err := res.Current.LookupErr("name") if err != nil { return nil, err } - if elem.Type() != bson.TypeString { - return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString) + if elem.Type != bson.TypeString { + return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type, bson.TypeString) } elemName := elem.StringValue() @@ -466,13 +479,13 @@ func (db *Database) WriteConcern() *writeconcern.WriteConcern { } // Watch returns a change stream for all changes to the corresponding database. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Database must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be a slice of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -500,8 +513,141 @@ func (db *Database) Watch(ctx context.Context, pipeline interface{}, // The opts parameter can be used to specify options for the operation (see the options.CreateCollectionOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/create/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/create/. func (db *Database) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + cco := options.MergeCreateCollectionOptions(opts...) + // Follow Client-Side Encryption specification to check for encryptedFields. + // Check for encryptedFields from create options. + ef := cco.EncryptedFields + // Check for encryptedFields from the client EncryptedFieldsMap. + if ef == nil { + ef = db.getEncryptedFieldsFromMap(name) + } + if ef != nil { + return db.createCollectionWithEncryptedFields(ctx, name, ef, opts...) + } + + return db.createCollection(ctx, name, opts...) +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by running the "listCollections" command. +// Returns nil and no error if the listCollections command succeeds, but "encryptedFields" is not present. +func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collectionName string) (interface{}, error) { + // Check if collection has an EncryptedFields configured server-side. + collSpecs, err := db.ListCollectionSpecifications(ctx, bson.D{{"name", collectionName}}) + if err != nil { + return nil, err + } + if len(collSpecs) == 0 { + return nil, nil + } + if len(collSpecs) > 1 { + return nil, fmt.Errorf("expected 1 or 0 results from listCollections, got %v", len(collSpecs)) + } + collSpec := collSpecs[0] + rawValue, err := collSpec.Options.LookupErr("encryptedFields") + if err == bsoncore.ErrElementNotFound { + return nil, nil + } else if err != nil { + return nil, err + } + + encryptedFields, ok := rawValue.DocumentOK() + if !ok { + return nil, fmt.Errorf("expected encryptedFields of %v to be document, got %v", collectionName, rawValue.Type) + } + + return encryptedFields, nil +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. +func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { + // Check the EncryptedFieldsMap + efMap := db.client.encryptedFieldsMap + if efMap == nil { + return nil + } + + namespace := db.name + "." + collectionName + + ef, ok := efMap[namespace] + if ok { + return ef + } + return nil +} + +// createCollectionWithEncryptedFields creates a collection with an EncryptedFields. +func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { + efBSON, err := transformBsoncoreDocument(db.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Create the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + + stateCollectionOpts := options.CreateCollection(). + SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}}) + // Create ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedStateCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, escCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, eccCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, ecocCollection, stateCollectionOpts); err != nil { + return err + } + + // Create a data collection with the 'encryptedFields' option. + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + + op.EncryptedFields(efBSON) + if err := db.executeCreateOperation(ctx, op); err != nil { + return err + } + + // Create an index on the __safeContent__ field in the collection @collectionName. + if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil { + return fmt.Errorf("error creating safeContent index: %v", err) + } + + return nil +} + +// createCollection creates a collection without EncryptedFields. +func (db *Database) createCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + return db.executeCreateOperation(ctx, op) +} + +func (db *Database) createCollectionOperation(name string, opts ...*options.CreateCollectionOptions) (*operation.Create, error) { cco := options.MergeCreateCollectionOptions(opts...) op := operation.NewCreate(name).ServerAPI(db.client.serverAPI) @@ -511,19 +657,26 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Collation != nil { op.Collation(bsoncore.Document(cco.Collation.ToDocument())) } + if cco.ChangeStreamPreAndPostImages != nil { + csppi, err := transformBsoncoreDocument(db.registry, cco.ChangeStreamPreAndPostImages, true, "changeStreamPreAndPostImages") + if err != nil { + return nil, err + } + op.ChangeStreamPreAndPostImages(csppi) + } if cco.DefaultIndexOptions != nil { idx, doc := bsoncore.AppendDocumentStart(nil) if cco.DefaultIndexOptions.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.DefaultIndexOptions.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } doc = bsoncore.AppendDocumentElement(doc, "storageEngine", storageEngine) } doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.IndexOptionDefaults(doc) @@ -537,7 +690,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } op.StorageEngine(storageEngine) } @@ -550,7 +703,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Validator != nil { validator, err := transformBsoncoreDocument(db.registry, cco.Validator, true, "validator") if err != nil { - return err + return nil, err } op.Validator(validator) } @@ -570,22 +723,29 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.TimeSeries(doc) } + if cco.ClusteredIndex != nil { + clusteredIndex, err := transformBsoncoreDocument(db.registry, cco.ClusteredIndex, true, "clusteredIndex") + if err != nil { + return nil, err + } + op.ClusteredIndex(clusteredIndex) + } - return db.executeCreateOperation(ctx, op) + return op, nil } // CreateView executes a create command to explicitly create a view on the server. See -// https://docs.mongodb.com/manual/core/views/ for more information about views. This method requires driver version >= +// https://www.mongodb.com/docs/manual/core/views/ for more information about views. This method requires driver version >= // 1.4.0 and MongoDB version >= 3.4. // // The viewName parameter specifies the name of the view to create. // -// The viewOn parameter specifies the name of the collection or view on which this view will be created +// # The viewOn parameter specifies the name of the collection or view on which this view will be created // // The pipeline parameter specifies an aggregation pipeline that will be exececuted against the source collection or // view to create this view. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go index 40b1af1..e750e33 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/description.go @@ -4,6 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package description contains types and functions for describing the state of MongoDB clusters. package description // import "go.mongodb.org/mongo-driver/mongo/description" // Unknown is an unknown server or topology kind. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go index 798de60..a20c86a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go @@ -40,6 +40,7 @@ type Server struct { HeartbeatInterval time.Duration HelloOK bool Hosts []string + IsCryptd bool LastError error LastUpdateTime time.Time LastWriteTime time.Time @@ -72,7 +73,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { var ok bool var isReplicaSet, isWritablePrimary, hidden, secondary, arbiterOnly bool var msg string - var version VersionRange + var versionRange VersionRange for _, element := range elements { switch element.Key() { case "arbiters": @@ -101,6 +102,12 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'electionId' to be a objectID but it's a BSON %s", element.Value().Type) return desc } + case "iscryptd": + desc.IsCryptd, ok = element.Value().BooleanOK() + if !ok { + desc.LastError = fmt.Errorf("expected 'iscryptd' to be a boolean but it's a BSON %s", element.Value().Type) + return desc + } case "helloOk": desc.HelloOK, ok = element.Value().BooleanOK() if !ok { @@ -189,13 +196,13 @@ func NewServer(addr address.Address, response bson.Raw) Server { } desc.CanonicalAddr = address.Address(me).Canonicalize() case "maxWireVersion": - version.Max, ok = element.Value().AsInt32OK() + versionRange.Max, ok = element.Value().AsInt32OK() if !ok { desc.LastError = fmt.Errorf("expected 'maxWireVersion' to be an integer but it's a BSON %s", element.Value().Type) return desc } case "minWireVersion": - version.Min, ok = element.Value().AsInt32OK() + versionRange.Min, ok = element.Value().AsInt32OK() if !ok { desc.LastError = fmt.Errorf("expected 'minWireVersion' to be an integer but it's a BSON %s", element.Value().Type) return desc @@ -286,10 +293,6 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = err return desc } - - if internal.SetMockServiceID { - desc.ServiceID = &desc.TopologyVersion.ProcessID - } } } @@ -325,7 +328,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.Kind = Mongos } - desc.WireVersion = &version + desc.WireVersion = &versionRange return desc } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index 753c45b..df5e77a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -143,7 +143,7 @@ func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelec return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { if t.Kind == LoadBalanced { // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check - // this before checking MaxStaleness support becuase there's no monitoring in this mode, so the candidate + // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate // server wouldn't have a wire version set, which would result in an error. return candidates, nil } @@ -296,13 +296,18 @@ func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server { } func selectByKind(candidates []Server, kind ServerKind) []Server { - var result []Server - for _, s := range candidates { + // Record the indices of viable candidates first and then append those to the returned slice + // to avoid appending costly Server structs directly as an optimization. + viableIndexes := make([]int, 0, len(candidates)) + for i, s := range candidates { if s.Kind == kind { - result = append(result, s) + viableIndexes = append(viableIndexes, i) } } - + result := make([]Server, len(viableIndexes)) + for i, idx := range viableIndexes { + result[i] = candidates[idx] + } return result } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index 669aa14..39bb530 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -11,67 +11,67 @@ // Basic usage of the driver starts with creating a Client from a connection // string. To do so, call Connect: // -// ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) -// defer cancel() -// client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://foo:bar@localhost:27017")) -// if err != nil { return err } +// ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) +// defer cancel() +// client, err := mongo.Connect(ctx, options.Client().ApplyURI("mongodb://foo:bar@localhost:27017")) +// if err != nil { return err } // // This will create a new client and start monitoring the MongoDB server on localhost. // The Database and Collection types can be used to access the database: // -// collection := client.Database("baz").Collection("qux") +// collection := client.Database("baz").Collection("qux") // // A Collection can be used to query the database or insert documents: // -// res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"}) -// if err != nil { return err } -// id := res.InsertedID +// res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"}) +// if err != nil { return err } +// id := res.InsertedID // // Several methods return a cursor, which can be used like this: // -// cur, err := collection.Find(context.Background(), bson.D{}) -// if err != nil { log.Fatal(err) } -// defer cur.Close(context.Background()) -// for cur.Next(context.Background()) { -// // To decode into a struct, use cursor.Decode() -// result := struct{ -// Foo string -// Bar int32 -// }{} -// err := cur.Decode(&result) -// if err != nil { log.Fatal(err) } -// // do something with result... -// -// // To get the raw bson bytes use cursor.Current -// raw := cur.Current -// // do something with raw... -// } -// if err := cur.Err(); err != nil { -// return err -// } +// cur, err := collection.Find(context.Background(), bson.D{}) +// if err != nil { log.Fatal(err) } +// defer cur.Close(context.Background()) +// for cur.Next(context.Background()) { +// // To decode into a struct, use cursor.Decode() +// result := struct{ +// Foo string +// Bar int32 +// }{} +// err := cur.Decode(&result) +// if err != nil { log.Fatal(err) } +// // do something with result... +// +// // To get the raw bson bytes use cursor.Current +// raw := cur.Current +// // do something with raw... +// } +// if err := cur.Err(); err != nil { +// return err +// } // // Cursor.All will decode all of the returned elements at once: // -// var results []struct{ -// Foo string -// Bar int32 -// } -// if err = cur.All(context.Background(), &results); err != nil { -// log.Fatal(err) -// } -// // do something with results... +// var results []struct{ +// Foo string +// Bar int32 +// } +// if err = cur.All(context.Background(), &results); err != nil { +// log.Fatal(err) +// } +// // do something with results... // // Methods that only return a single document will return a *SingleResult, which works // like a *sql.Row: // -// result := struct{ -// Foo string -// Bar int32 -// }{} -// filter := bson.D{{"hello", "world"}} -// err := collection.FindOne(context.Background(), filter).Decode(&result) -// if err != nil { return err } -// // do something with result... +// result := struct{ +// Foo string +// Bar int32 +// }{} +// filter := bson.D{{"hello", "world"}} +// err := collection.FindOne(context.Background(), filter).Decode(&result) +// if err != nil { return err } +// // do something with result... // // All Client, Collection, and Database methods that take parameters of type interface{} // will return ErrNilDocument if nil is passed in for an interface{}. @@ -79,7 +79,7 @@ // Additional examples can be found under the examples directory in the driver's repository and // on the MongoDB website. // -// Error Handling +// # Error Handling // // Errors from the MongoDB server will implement the ServerError interface, which has functions to check for specific // error codes, labels, and message substrings. These can be used to check for and handle specific errors. Some methods, @@ -87,26 +87,42 @@ // functions will return true if any of the contained errors satisfy the check. // // There are also helper functions to check for certain specific types of errors: -// IsDuplicateKeyError(error) -// IsNetworkError(error) -// IsTimeout(error) // -// Potential DNS Issues +// IsDuplicateKeyError(error) +// IsNetworkError(error) +// IsTimeout(error) +// +// # Potential DNS Issues // // Building with Go 1.11+ and using connection strings with the "mongodb+srv"[1] scheme is // incompatible with some DNS servers in the wild due to the change introduced in // https://github.com/golang/go/issues/10622. If you receive an error with the message "cannot // unmarshal DNS message" while running an operation, we suggest you use a different DNS server. // -// Client Side Encryption +// # Client Side Encryption // // Client-side encryption is a new feature in MongoDB 4.2 that allows specific data fields to be encrypted. Using this -// feature requires specifying the "cse" build tag during compilation. +// feature requires specifying the "cse" build tag during compilation: +// +// go build -tags cse +// +// Note: Auto encryption is an enterprise- and Atlas-only feature. +// +// The libmongocrypt C library is required when using client-side encryption. Specific versions of libmongocrypt +// are required for different versions of the Go Driver: +// +// - Go Driver v1.2.0 requires libmongocrypt v1.0.0 or higher +// +// - Go Driver v1.5.0 requires libmongocrypt v1.1.0 or higher // -// Note: Auto encryption is an enterprise-only feature. +// - Go Driver v1.8.0 requires libmongocrypt v1.3.0 or higher // -// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.3.0 or higher is -// required when using driver version 1.8.0 or higher. To install libmongocrypt, follow the instructions for your +// - Go Driver v1.10.0 requires libmongocrypt v1.5.0 or higher. +// There is a severe bug when calling RewrapManyDataKey with libmongocrypt versions less than 1.5.2. +// This bug may result in data corruption. +// Please use libmongocrypt 1.5.2 or higher when calling RewrapManyDataKey. +// +// To install libmongocrypt, follow the instructions for your // operating system: // // 1. Linux: follow the instructions listed at @@ -117,29 +133,20 @@ // to install packages via brew and compile the libmongocrypt source code. // // 3. Windows: -// mkdir -p c:/libmongocrypt/bin -// mkdir -p c:/libmongocrypt/include -// -// // Run the curl command in an empty directory as it will create new directories when unpacked. -// curl https://s3.amazonaws.com/mciuploads/libmongocrypt/windows/latest_release/libmongocrypt.tar.gz --output libmongocrypt.tar.gz -// tar -xvzf libmongocrypt.tar.gz -// -// cp ./bin/mongocrypt.dll c:/libmongocrypt/bin -// cp ./include/mongocrypt/*.h c:/libmongocrypt/include -// export PATH=$PATH:/cygdrive/c/libmongocrypt/bin -// -// libmongocrypt communicates with the mongocryptd process for automatic encryption. This process can be started manually -// or auto-spawned by the driver itself. To enable auto-spawning, ensure the process binary is on the PATH. To start it -// manually, use AutoEncryptionOptions: -// -// aeo := options.AutoEncryption() -// mongocryptdOpts := map[string]interface{}{ -// "mongocryptdBypassSpawn": true, -// } -// aeo.SetExtraOptions(mongocryptdOpts) -// To specify a process URI for mongocryptd, the "mongocryptdURI" option can be passed in the ExtraOptions map as well. -// See the ClientSideEncryption and ClientSideEncryptionCreateKey examples below for code samples about using this -// feature. -// -// [1] See https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format +// +// mkdir -p c:/libmongocrypt/bin +// mkdir -p c:/libmongocrypt/include +// +// // Run the curl command in an empty directory as it will create new directories when unpacked. +// curl https://s3.amazonaws.com/mciuploads/libmongocrypt/windows/latest_release/libmongocrypt.tar.gz --output libmongocrypt.tar.gz +// tar -xvzf libmongocrypt.tar.gz +// +// cp ./bin/mongocrypt.dll c:/libmongocrypt/bin +// cp ./include/mongocrypt/*.h c:/libmongocrypt/include +// export PATH=$PATH:/cygdrive/c/libmongocrypt/bin +// +// libmongocrypt communicates with the mongocryptd process or mongo_crypt shared library for automatic encryption. +// See AutoEncryptionOpts.SetExtraOptions for options to configure use of mongocryptd or mongo_crypt. +// +// [1] See https://www.mongodb.com/docs/manual/reference/connection-string/#dns-seedlist-connection-format package mongo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 2c3ae15..620022e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -46,6 +46,11 @@ func (e ErrMapForOrderedArgument) Error() string { } func replaceErrors(err error) error { + // Return nil when err is nil to avoid costly reflection logic below. + if err == nil { + return nil + } + if err == topology.ErrTopologyClosed { return ErrClientDisconnected } @@ -56,6 +61,7 @@ func replaceErrors(err error) error { Labels: de.Labels, Name: de.Name, Wrapped: de.Wrapped, + Raw: bson.Raw(de.Raw), } } if qe, ok := err.(driver.QueryFailureError); ok { @@ -63,6 +69,7 @@ func replaceErrors(err error) error { ce := CommandError{ Name: qe.Message, Wrapped: qe.Wrapped, + Raw: bson.Raw(qe.Response), } dollarErr, err := qe.Response.LookupErr("$err") @@ -102,6 +109,22 @@ func IsTimeout(err error) bool { if err == context.DeadlineExceeded { return true } + if err == driver.ErrDeadlineWouldBeExceeded { + return true + } + if err == topology.ErrServerSelectionTimeout { + return true + } + if _, ok := err.(topology.WaitQueueTimeoutError); ok { + return true + } + if ce, ok := err.(CommandError); ok && ce.IsMaxTimeMSExpiredError() { + return true + } + if we, ok := err.(WriteException); ok && we.WriteConcernError != nil && + we.WriteConcernError.IsMaxTimeMSExpiredError() { + return true + } if ne, ok := err.(net.Error); ok { return ne.Timeout() } @@ -207,6 +230,7 @@ type ServerError interface { } var _ ServerError = CommandError{} +var _ ServerError = WriteError{} var _ ServerError = WriteException{} var _ ServerError = BulkWriteException{} @@ -217,6 +241,7 @@ type CommandError struct { Labels []string // Categories to which the error belongs Name string // A human-readable name corresponding to the error code Wrapped error // The underlying error, if one exists. + Raw bson.Raw // The original server response containing the error. } // Error implements the error interface. @@ -276,6 +301,9 @@ type WriteError struct { Code int Message string Details bson.Raw + + // The original write error from the server response. + Raw bson.Raw } func (we WriteError) Error() string { @@ -286,6 +314,30 @@ func (we WriteError) Error() string { return msg } +// HasErrorCode returns true if the error has the specified code. +func (we WriteError) HasErrorCode(code int) bool { + return we.Code == code +} + +// HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels, +// so we always return false. +func (we WriteError) HasErrorLabel(label string) bool { + return false +} + +// HasErrorMessage returns true if the error contains the specified message. +func (we WriteError) HasErrorMessage(message string) bool { + return strings.Contains(we.Message, message) +} + +// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message. +func (we WriteError) HasErrorCodeWithMessage(code int, message string) bool { + return we.Code == code && strings.Contains(we.Message, message) +} + +// serverError implements the ServerError interface. +func (we WriteError) serverError() {} + // WriteErrors is a group of write errors that occurred during execution of a write operation. type WriteErrors []WriteError @@ -307,6 +359,7 @@ func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors { Code: int(err.Code), Message: err.Message, Details: bson.Raw(err.Details), + Raw: bson.Raw(err.Raw), }) } return wes @@ -319,6 +372,7 @@ type WriteConcernError struct { Code int Message string Details bson.Raw + Raw bson.Raw // The original write concern error from the server response. } // Error implements the error interface. @@ -329,6 +383,11 @@ func (wce WriteConcernError) Error() string { return wce.Message } +// IsMaxTimeMSExpiredError returns true if the error is a MaxTimeMSExpired error. +func (wce WriteConcernError) IsMaxTimeMSExpiredError() bool { + return wce.Code == 50 +} + // WriteException is the error type returned by the InsertOne, DeleteOne, DeleteMany, UpdateOne, UpdateMany, and // ReplaceOne operations. type WriteException struct { @@ -340,6 +399,9 @@ type WriteException struct { // The categories to which the exception belongs. Labels []string + + // The original server response containing the error. + Raw bson.Raw } // Error implements the error interface. @@ -426,6 +488,7 @@ func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcern Code: int(wce.Code), Message: wce.Message, Details: bson.Raw(wce.Details), + Raw: bson.Raw(wce.Raw), } } @@ -559,6 +622,7 @@ func processWriteError(err error) (returnResult, error) { WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError), WriteErrors: writeErrorsFromDriverWriteErrors(tt.WriteErrors), Labels: tt.Labels, + Raw: bson.Raw(tt.Raw), } default: return rrNone, replaceErrors(err) @@ -578,7 +642,8 @@ const batchErrorsTargetLength = 2000 // to the end. // // Example format: -// "[message 1, message 2, +8 more errors...]" +// +// "[message 1, message 2, +8 more errors...]" func joinBatchErrors(errs []error) string { var buf bytes.Buffer fmt.Fprint(&buf, "[") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index e8e260f..6bb33f0 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -12,7 +12,6 @@ import ( "errors" "fmt" "strconv" - "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -45,7 +44,7 @@ type IndexView struct { // IndexModel represents a new index to be created. type IndexModel struct { // A document describing which keys should be used for the index. It cannot be nil. This must be an order-preserving - // type such as bson.D. Map types such as bson.M are not valid. See https://docs.mongodb.com/manual/indexes/#indexes + // type such as bson.D. Map types such as bson.M are not valid. See https://www.mongodb.com/docs/manual/indexes/#indexes // for examples of valid documents. Keys interface{} @@ -65,7 +64,7 @@ func isNamespaceNotFoundError(err error) bool { // The opts parameter can be used to specify options for this operation (see the options.ListIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listIndexes/. func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -95,7 +94,8 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() lio := options.MergeListIndexesOptions(opts...) @@ -103,9 +103,7 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption op = op.BatchSize(*lio.BatchSize) cursorOpts.BatchSize = *lio.BatchSize } - if lio.MaxTime != nil { - op = op.MaxTimeMS(int64(*lio.MaxTime / time.Millisecond)) - } + op = op.MaxTime(lio.MaxTime) retry := driver.RetryNone if iv.coll.client.retryReads { retry = driver.RetryOncePerCommand @@ -175,7 +173,7 @@ func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*op // The opts parameter can be used to specify options for this operation (see the options.CreateIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/createIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/createIndexes/. func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) { names := make([]string, 0, len(models)) @@ -256,11 +254,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). - Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI) - - if option.MaxTime != nil { - op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) - } + Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout).MaxTime(option.MaxTime) if option.CommitQuorum != nil { commitQuorum, err := transformValue(iv.coll.registry, option.CommitQuorum, true, "commitQuorum") if err != nil { @@ -400,10 +395,8 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) - if dio.MaxTime != nil { - op.MaxTimeMS(int64(*dio.MaxTime / time.Millisecond)) - } + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout).MaxTime(dio.MaxTime) err = op.Execute(ctx) if err != nil { @@ -427,7 +420,7 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) { if name == "*" { return nil, ErrMultipleIndexDrop @@ -443,7 +436,7 @@ func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.D // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) { return iv.drop(ctx, "*", opts...) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 89eec43..2fa5e54 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -65,11 +65,10 @@ func (me MarshalError) Error() string { // // Example usage: // -// mongo.Pipeline{ -// {{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}}, -// {{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}}, -// } -// +// mongo.Pipeline{ +// {{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}}, +// {{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}}, +// } type Pipeline []bson.D // transformAndEnsureID is a hack that makes it easy to get a RawValue as the _id value. @@ -123,17 +122,6 @@ func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonco return doc, id, nil } -func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) { - if doc, ok := val.(bsonx.Doc); ok { - return doc.Copy(), nil - } - b, err := transformBsoncoreDocument(registry, val, true, "document") - if err != nil { - return nil, err - } - return bsonx.ReadDoc(b) -} - func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Document, error) { if registry == nil { registry = bson.DefaultRegistry @@ -175,7 +163,7 @@ func ensureDollarKey(doc bsoncore.Document) error { func ensureNoDollarKey(doc bsoncore.Document) error { if elem, err := doc.IndexErr(0); err == nil && strings.HasPrefix(elem.Key(), "$") { - return errors.New("replacement document cannot contains keys beginning with '$") + return errors.New("replacement document cannot contain keys beginning with '$'") } return nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index c36b1d3..016ccef 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -28,17 +28,18 @@ const ( var defaultTimeoutArgs = []string{"--idleShutdownTimeoutSecs=60"} var databaseOpts = options.Database().SetReadConcern(readconcern.New()).SetReadPreference(readpref.Primary()) -type mcryptClient struct { +type mongocryptdClient struct { bypassSpawn bool client *Client path string spawnArgs []string } -func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) { +func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool + if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok { bypassSpawn = bypass.(bool) } @@ -46,10 +47,15 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) bypassAutoEncryption = *opts.BypassAutoEncryption } - mc := &mcryptClient{ - // mongocryptd should not be spawned if mongocryptdBypassSpawn is passed or if bypassAutoEncryption is - // specified because it is not used during decryption - bypassSpawn: bypassSpawn || bypassAutoEncryption, + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc := &mongocryptdClient{ + // mongocryptd should not be spawned if any of these conditions are true: + // - mongocryptdBypassSpawn is passed + // - bypassAutoEncryption is true because mongocryptd is not used during decryption + // - bypassQueryAnalysis is true because mongocryptd is not used during decryption + // - the crypt_shared library is available because it replaces all mongocryptd functionality. + bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis || cryptSharedLibAvailable, } if !mc.bypassSpawn { @@ -76,7 +82,7 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) } // markCommand executes the given command on mongocryptd. -func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { +func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { // Remove the explicit session from the context if one is set. // The explicit session will be from a different client. // If an explicit session is set, it is applied after automatic encryption. @@ -105,16 +111,16 @@ func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bson } // connect connects the underlying Client instance. This must be called before performing any mark operations. -func (mc *mcryptClient) connect(ctx context.Context) error { +func (mc *mongocryptdClient) connect(ctx context.Context) error { return mc.client.Connect(ctx) } // disconnect disconnects the underlying Client instance. This should be called after all operations have completed. -func (mc *mcryptClient) disconnect(ctx context.Context) error { +func (mc *mongocryptdClient) disconnect(ctx context.Context) error { return mc.client.Disconnect(ctx) } -func (mc *mcryptClient) spawnProcess() error { +func (mc *mongocryptdClient) spawnProcess() error { // Ignore gosec warning about subprocess launched with externally-provided path variable. /* #nosec G204 */ cmd := exec.Command(mc.path, mc.spawnArgs...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index e1f710f..38ed249 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -6,7 +6,11 @@ package options -import "time" +import ( + "time" + + "go.mongodb.org/mongo-driver/bson" +) // AggregateOptions represents options that can be used to configure an Aggregate operation. type AggregateOptions struct { @@ -19,7 +23,7 @@ type AggregateOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool @@ -30,6 +34,10 @@ type AggregateOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there // is no time limit for query execution. + // + // NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used + // in its place to control the amount of time that a single operation can run before returning an error. MaxTime + // is ignored if Timeout is set on the client. MaxTime *time.Duration // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. @@ -37,7 +45,7 @@ type AggregateOptions struct { MaxAwaitTime *time.Duration // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. - // The default is the empty string, which means that no comment will be included in the logs. + // The default is nil, which means that no comment will be included in the logs. Comment *string // The index to use for the aggregation. This should either be the index name as a string or the index specification @@ -50,6 +58,11 @@ type AggregateOptions struct { // Values must be constant or closed expressions that do not reference document fields. Parameters can then be // accessed as variables in an aggregate expression context (e.g. "$$var"). Let interface{} + + // Custom options to be added to aggregate expression. Key-value pairs of the BSON map should correlate with desired + // option names and values. Values must be Marshalable. Custom options may conflict with non-custom options, and custom + // options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M } // Aggregate creates a new AggregateOptions instance. @@ -82,6 +95,10 @@ func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout +// option may be used in its place to control the amount of time that a single operation can +// run before returning an error. MaxTime is ignored if Timeout is set on the client. func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions { ao.MaxTime = &d return ao @@ -111,6 +128,15 @@ func (ao *AggregateOptions) SetLet(let interface{}) *AggregateOptions { return ao } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions { + ao.Custom = c + return ao +} + // MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins // fashion. func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { @@ -146,6 +172,9 @@ func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { if ao.Let != nil { aggOpts.Let = ao.Let } + if ao.Custom != nil { + aggOpts.Custom = ao.Custom + } } return aggOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index 89c3c05..f42714b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -8,6 +8,9 @@ package options import ( "crypto/tls" + "net/http" + + "go.mongodb.org/mongo-driver/internal" ) // AutoEncryptionOptions represents options used to configure auto encryption/decryption behavior for a mongo.Client @@ -32,11 +35,16 @@ type AutoEncryptionOptions struct { BypassAutoEncryption *bool ExtraOptions map[string]interface{} TLSConfig map[string]*tls.Config + HTTPClient *http.Client + EncryptedFieldsMap map[string]interface{} + BypassQueryAnalysis *bool } // AutoEncryption creates a new AutoEncryptionOptions configured with default values. func AutoEncryption() *AutoEncryptionOptions { - return &AutoEncryptionOptions{} + return &AutoEncryptionOptions{ + HTTPClient: internal.DefaultHTTPClient, + } } // SetKeyVaultClientOptions specifies options for the client used to communicate with the key vault collection. @@ -90,7 +98,35 @@ func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryp return a } -// SetExtraOptions specifies a map of options to configure the mongocryptd process. +// SetExtraOptions specifies a map of options to configure the mongocryptd process or mongo_crypt shared library. +// +// # Supported Extra Options +// +// "mongocryptdURI" - The mongocryptd URI. Allows setting a custom URI used to communicate with the +// mongocryptd process. The default is "mongodb://localhost:27020", which works with the default +// mongocryptd process spawned by the Client. Must be a string. +// +// "mongocryptdBypassSpawn" - If set to true, the Client will not attempt to spawn a mongocryptd +// process. Must be a bool. +// +// "mongocryptdSpawnPath" - The path used when spawning mongocryptd. +// Defaults to empty string and spawns mongocryptd from system path. Must be a string. +// +// "mongocryptdSpawnArgs" - Command line arguments passed when spawning mongocryptd. +// Defaults to ["--idleShutdownTimeoutSecs=60"]. Must be an array of strings. +// +// "cryptSharedLibRequired" - If set to true, Client creation will return an error if the +// crypt_shared library is not loaded. If unset or set to false, Client creation will not return an +// error if the crypt_shared library is not loaded. The default is unset. Must be a bool. +// +// "cryptSharedLibPath" - The crypt_shared library override path. This must be the path to the +// crypt_shared dynamic library file (for example, a .so, .dll, or .dylib file), not the directory +// that contains it. If the override path is a relative path, it will be resolved relative to the +// working directory of the process. If the override path is a relative path and the first path +// component is the literal string "$ORIGIN", the "$ORIGIN" component will be replaced by the +// absolute path to the directory containing the linked libmongocrypt library. Setting an override +// path disables the default system library search path. If an override path is specified but the +// crypt_shared library cannot be loaded, Client creation will return an error. Must be a string. func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions { a.ExtraOptions = extraOpts return a @@ -113,6 +149,22 @@ func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *Au return a } +// SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. +// EncryptedFieldsMap is used for Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { + a.EncryptedFieldsMap = ef + return a +} + +// SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. +// Use this option when using explicit encryption with Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { + a.BypassQueryAnalysis = &bypass + return a +} + // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() @@ -142,6 +194,15 @@ func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionO if opt.TLSConfig != nil { aeo.TLSConfig = opt.TLSConfig } + if opt.EncryptedFieldsMap != nil { + aeo.EncryptedFieldsMap = opt.EncryptedFieldsMap + } + if opt.BypassQueryAnalysis != nil { + aeo.BypassQueryAnalysis = opt.BypassQueryAnalysis + } + if opt.HTTPClient != nil { + aeo.HTTPClient = opt.HTTPClient + } } return aeo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 57f98f8..0c36d0b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -13,12 +13,22 @@ var DefaultOrdered = true type BulkWriteOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default value is nil, which means that no comment will be included in the logs. + Comment interface{} + // If true, no writes will be executed after one fails. The default value is true. Ordered *bool + + // Specifies parameters for all update and delete commands in the BulkWrite. This option is only valid for MongoDB + // versions >= 5.0. Older servers will report an error for using this option. This must be a document mapping + // parameter names to values. Values must be constant or closed expressions that do not reference document fields. + // Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // BulkWrite creates a new *BulkWriteOptions instance. @@ -28,6 +38,12 @@ func BulkWrite() *BulkWriteOptions { } } +// SetComment sets the value for the Comment field. +func (b *BulkWriteOptions) SetComment(comment interface{}) *BulkWriteOptions { + b.Comment = comment + return b +} + // SetOrdered sets the value for the Ordered field. func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions { b.Ordered = &ordered @@ -40,6 +56,15 @@ func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOp return b } +// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the BulkWrite. +// This option is only valid for MongoDB versions >= 5.0. Older servers will report an error for using this option. +// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not +// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). +func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions { + b.Let = &let + return b +} + // MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins // fashion. func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { @@ -48,12 +73,18 @@ func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { if opt == nil { continue } + if opt.Comment != nil { + b.Comment = opt.Comment + } if opt.Ordered != nil { b.Ordered = opt.Ordered } if opt.BypassDocumentValidation != nil { b.BypassDocumentValidation = opt.BypassDocumentValidation } + if opt.Let != nil { + b.Let = opt.Let + } } return b diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index fe19f45..862abcd 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -9,6 +9,7 @@ package options import ( "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -22,11 +23,18 @@ type ChangeStreamOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation - // Specifies whether the updated document should be returned in change notifications for update operations along - // with the deltas describing the changes made to the document. The default is options.Default, which means that - // the updated document will not be included in the change notification. + // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. + // The default is nil, which means that no comment will be included in the logs. + Comment *string + + // Specifies how the updated document should be returned in change notifications for update operations. The default + // is options.Default, which means that only partial update deltas will be included in the change notification. FullDocument *FullDocument + // Specifies how the pre-update document should be returned in change notifications for update operations. The default + // is options.Off, which means that the pre-update document will not be included in the change notification. + FullDocumentBeforeChange *FullDocument + // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. MaxAwaitTime *time.Duration @@ -35,6 +43,11 @@ type ChangeStreamOptions struct { // StartAfter must not be set. ResumeAfter interface{} + // ShowExpandedEvents specifies whether the server will return an expanded list of change stream events. Additional + // events include: createIndexes, dropIndexes, modify, create, shardCollection, reshardCollection and + // refineCollectionShardKey. This option is only valid for MongoDB versions >= 6.0. + ShowExpandedEvents *bool + // If specified, the change stream will only return changes that occurred at or after the given timestamp. This // option is only valid for MongoDB versions >= 4.0. If this is specified, ResumeAfter and StartAfter must not be // set. @@ -46,6 +59,16 @@ type ChangeStreamOptions struct { // corresponding to an oplog entry immediately after the specified token will be returned. If this is specified, // ResumeAfter and StartAtOperationTime must not be set. This option is only valid for MongoDB versions >= 4.1.1. StartAfter interface{} + + // Custom options to be added to the initial aggregate for the change stream. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom options may conflict with + // non-custom options, and custom options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M + + // Custom options to be added to the $changeStream stage in the initial aggregate. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom pipeline options bypass client-side + // validation. Prefer using non-custom options where possible. + CustomPipeline bson.M } // ChangeStream creates a new ChangeStreamOptions instance. @@ -67,12 +90,24 @@ func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions { return cso } +// SetComment sets the value for the Comment field. +func (cso *ChangeStreamOptions) SetComment(comment string) *ChangeStreamOptions { + cso.Comment = &comment + return cso +} + // SetFullDocument sets the value for the FullDocument field. func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions { cso.FullDocument = &fd return cso } +// SetFullDocumentBeforeChange sets the value for the FullDocumentBeforeChange field. +func (cso *ChangeStreamOptions) SetFullDocumentBeforeChange(fdbc FullDocument) *ChangeStreamOptions { + cso.FullDocumentBeforeChange = &fdbc + return cso +} + // SetMaxAwaitTime sets the value for the MaxAwaitTime field. func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions { cso.MaxAwaitTime = &d @@ -85,6 +120,12 @@ func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOpti return cso } +// SetShowExpandedEvents sets the value for the ShowExpandedEvents field. +func (cso *ChangeStreamOptions) SetShowExpandedEvents(see bool) *ChangeStreamOptions { + cso.ShowExpandedEvents = &see + return cso +} + // SetStartAtOperationTime sets the value for the StartAtOperationTime field. func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions { cso.StartAtOperationTime = t @@ -97,6 +138,23 @@ func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptio return cso } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (cso *ChangeStreamOptions) SetCustom(c bson.M) *ChangeStreamOptions { + cso.Custom = c + return cso +} + +// SetCustomPipeline sets the value for the CustomPipeline field. Key-value pairs of the BSON map +// should correlate with desired option names and values. Values must be Marshalable. Custom pipeline +// options bypass client-side validation. Prefer using non-custom options where possible. +func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOptions { + cso.CustomPipeline = cp + return cso +} + // MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a // last-one-wins fashion. func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions { @@ -111,21 +169,36 @@ func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions if cso.Collation != nil { csOpts.Collation = cso.Collation } + if cso.Comment != nil { + csOpts.Comment = cso.Comment + } if cso.FullDocument != nil { csOpts.FullDocument = cso.FullDocument } + if cso.FullDocumentBeforeChange != nil { + csOpts.FullDocumentBeforeChange = cso.FullDocumentBeforeChange + } if cso.MaxAwaitTime != nil { csOpts.MaxAwaitTime = cso.MaxAwaitTime } if cso.ResumeAfter != nil { csOpts.ResumeAfter = cso.ResumeAfter } + if cso.ShowExpandedEvents != nil { + csOpts.ShowExpandedEvents = cso.ShowExpandedEvents + } if cso.StartAtOperationTime != nil { csOpts.StartAtOperationTime = cso.StartAtOperationTime } if cso.StartAfter != nil { csOpts.StartAfter = cso.StartAfter } + if cso.Custom != nil { + csOpts.Custom = cso.Custom + } + if cso.CustomPipeline != nil { + csOpts.CustomPipeline = cso.CustomPipeline + } } return csOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go index b8f6e87..81ea42d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go @@ -9,6 +9,9 @@ package options import ( "crypto/tls" "fmt" + "net/http" + + "go.mongodb.org/mongo-driver/internal" ) // ClientEncryptionOptions represents all possible options used to configure a ClientEncryption instance. @@ -16,11 +19,14 @@ type ClientEncryptionOptions struct { KeyVaultNamespace string KmsProviders map[string]map[string]interface{} TLSConfig map[string]*tls.Config + HTTPClient *http.Client } // ClientEncryption creates a new ClientEncryptionOptions instance. func ClientEncryption() *ClientEncryptionOptions { - return &ClientEncryptionOptions{} + return &ClientEncryptionOptions{ + HTTPClient: internal.DefaultHTTPClient, + } } // SetKeyVaultNamespace specifies the namespace of the key vault collection. This is required. @@ -56,12 +62,12 @@ func (c *ClientEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) * // to the KMS provider. The input map should contain a mapping from each KMS provider to a document containing the necessary // options, as follows: // -// { -// "kmip": { -// "tlsCertificateKeyFile": "foo.pem", -// "tlsCAFile": "fooCA.pem" -// } -// } +// { +// "kmip": { +// "tlsCertificateKeyFile": "foo.pem", +// "tlsCAFile": "fooCA.pem" +// } +// } // // Currently, the following TLS options are supported: // @@ -132,6 +138,9 @@ func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncry if opt.TLSConfig != nil { ceo.TLSConfig = opt.TLSConfig } + if opt.HTTPClient != nil { + ceo.HTTPClient = opt.HTTPClient + } } return ceo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index da5f630..4355b2f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -16,6 +16,7 @@ import ( "fmt" "io/ioutil" "net" + "net/http" "strings" "time" @@ -45,7 +46,7 @@ type ContextDialer interface { // AuthMechanism: the mechanism to use for authentication. Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", // "MONGODB-CR", "PLAIN", "GSSAPI", "MONGODB-X509", and "MONGODB-AWS". This can also be set through the "authMechanism" // URI option. (e.g. "authMechanism=PLAIN"). For more information, see -// https://docs.mongodb.com/manual/core/authentication-mechanisms/. +// https://www.mongodb.com/docs/manual/core/authentication-mechanisms/. // // AuthMechanismProperties can be used to specify additional configuration options for certain mechanisms. They can also // be set through the "authMechanismProperites" URI option @@ -104,6 +105,7 @@ type ClientOptions struct { DisableOCSPEndpointCheck *bool HeartbeatInterval *time.Duration Hosts []string + HTTPClient *http.Client LoadBalanced *bool LocalThreshold *time.Duration MaxConnIdleTime *time.Duration @@ -121,9 +123,9 @@ type ClientOptions struct { RetryWrites *bool ServerAPIOptions *ServerAPIOptions ServerSelectionTimeout *time.Duration - SocketTimeout *time.Duration SRVMaxHosts *int SRVServiceName *string + Timeout *time.Duration TLSConfig *tls.Config WriteConcern *writeconcern.WriteConcern ZlibLevel *int @@ -151,66 +153,76 @@ type ClientOptions struct { // Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any // release. Deployment driver.Deployment + + // SocketTimeout specifies the timeout to be used for the Client's socket reads and writes. + // + // NOTE(benjirewis): SocketTimeout will be deprecated in a future release. The more general Timeout option + // may be used in its place to control the amount of time that a single operation can run before returning + // an error. Setting SocketTimeout and Timeout on a single client will result in undefined behavior. + SocketTimeout *time.Duration } // Client creates a new ClientOptions instance. func Client() *ClientOptions { - return new(ClientOptions) + return &ClientOptions{ + HTTPClient: internal.DefaultHTTPClient, + } } // Validate validates the client options. This method will return the first error found. func (c *ClientOptions) Validate() error { - c.validateAndSetError() - return c.err -} - -func (c *ClientOptions) validateAndSetError() { if c.err != nil { - return + return c.err } + c.err = c.validate() + return c.err +} +func (c *ClientOptions) validate() error { // Direct connections cannot be made if multiple hosts are specified or an SRV URI is used. if c.Direct != nil && *c.Direct { if len(c.Hosts) > 1 { - c.err = errors.New("a direct connection cannot be made if multiple hosts are specified") - return + return errors.New("a direct connection cannot be made if multiple hosts are specified") } if c.cs != nil && c.cs.Scheme == connstring.SchemeMongoDBSRV { - c.err = errors.New("a direct connection cannot be made if an SRV URI is used") - return + return errors.New("a direct connection cannot be made if an SRV URI is used") } } + if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && *c.MinPoolSize > *c.MaxPoolSize { + return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", *c.MinPoolSize, *c.MaxPoolSize) + } + // verify server API version if ServerAPIOptions are passed in. if c.ServerAPIOptions != nil { - c.err = c.ServerAPIOptions.ServerAPIVersion.Validate() + if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { + return err + } } // Validation for load-balanced mode. if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { - c.err = internal.ErrLoadBalancedWithMultipleHosts - return + return internal.ErrLoadBalancedWithMultipleHosts } if c.ReplicaSet != nil { - c.err = internal.ErrLoadBalancedWithReplicaSet - return + return internal.ErrLoadBalancedWithReplicaSet } - if c.Direct != nil { - c.err = internal.ErrLoadBalancedWithDirectConnection - return + if c.Direct != nil && *c.Direct { + return internal.ErrLoadBalancedWithDirectConnection } } // Validation for srvMaxHosts. if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { if c.ReplicaSet != nil { - c.err = internal.ErrSRVMaxHostsWithReplicaSet + return internal.ErrSRVMaxHostsWithReplicaSet } if c.LoadBalanced != nil && *c.LoadBalanced { - c.err = internal.ErrSRVMaxHostsWithLoadBalanced + return internal.ErrSRVMaxHostsWithLoadBalanced } } + return nil } // GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during @@ -231,7 +243,7 @@ func (c *ClientOptions) GetURI() string { // If the URI format is incorrect or there are conflicting options specified in the URI an error will be recorded and // can be retrieved by calling Validate. // -// For more information about the URI format, see https://docs.mongodb.com/manual/reference/connection-string/. See +// For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See // mongo.Connect documentation for examples of using URIs for different Client configurations. func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { if c.err != nil { @@ -445,6 +457,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.DisableOCSPEndpointCheck = &cs.SSLDisableOCSPEndpointCheck } + if cs.TimeoutSet { + c.Timeout = &cs.Timeout + } + return c } @@ -470,12 +486,12 @@ func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { // // 2. "zlib" - requires server version >= 3.6 // -// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver version >= 1.3.0 -// without cgo +// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver +// version >= 1.3.0 without cgo. // // If this option is specified, the driver will perform a negotiation with the server to determine a common list of of // compressors and will use the first one in that list when performing operations. See -// https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more +// https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more // information about configuring compression on the server and the server-side defaults. // // This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is @@ -574,7 +590,7 @@ func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions { // SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server. // Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option -// (e.g. "maxPoolSize=100"). The default is 100. If this is 0, it will be set to math.MaxInt64. +// (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100. func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions { c.MaxPoolSize = &u return c @@ -636,7 +652,7 @@ func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptio // 3. "maxStalenessSeconds" (or "maxStaleness"): Specify a maximum replication lag for reads from secondaries in a // replica set (e.g. "maxStalenessSeconds=10"). // -// The default is readpref.Primary(). See https://docs.mongodb.com/manual/core/read-preference/#read-preference for +// The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for // more information about read preferences. func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions { c.ReadPreference = rp @@ -702,11 +718,31 @@ func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOption // SetSocketTimeout specifies how long the driver will wait for a socket read or write to return before returning a // network error. This can also be set through the "socketTimeoutMS" URI option (e.g. "socketTimeoutMS=1000"). The // default value is 0, meaning no timeout is used and socket operations can block indefinitely. +// +// NOTE(benjirewis): SocketTimeout will be deprecated in a future release. The more general Timeout option may be used +// in its place to control the amount of time that a single operation can run before returning an error. Setting +// SocketTimeout and Timeout on a single client will result in undefined behavior. func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions { c.SocketTimeout = &d return c } +// SetTimeout specifies the amount of time that a single operation run on this Client can execute before returning an error. +// The deadline of any operation run through the Client will be honored above any Timeout set on the Client; Timeout will only +// be honored if there is no deadline on the operation Context. Timeout can also be set through the "timeoutMS" URI option +// (e.g. "timeoutMS=1000"). The default value is nil, meaning operations do not inherit a timeout from the Client. +// +// If any Timeout is set (even 0) on the Client, the values of MaxTime on operation options, TransactionOptions.MaxCommitTime and +// SessionOptions.DefaultMaxCommitTime will be ignored. Setting Timeout and SocketTimeout or WriteConcern.wTimeout will result +// in undefined behavior. +// +// NOTE(benjirewis): SetTimeout represents unstable, provisional API. The behavior of the driver when a Timeout is specified is +// subject to change. +func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { + c.Timeout = &d + return c +} + // SetTLSConfig specifies a tls.Config instance to use use to configure TLS on all connections created to the cluster. // This can also be set through the following URI options: // @@ -735,6 +771,14 @@ func (c *ClientOptions) SetTLSConfig(cfg *tls.Config) *ClientOptions { return c } +// SetHTTPClient specifies the http.Client to be used for any HTTP requests. +// +// This should only be used to set custom HTTP client configurations. By default, the connection will use an internal.DefaultHTTPClient. +func (c *ClientOptions) SetHTTPClient(client *http.Client) *ClientOptions { + c.HTTPClient = client + return c +} + // SetWriteConcern specifies the write concern to use to for write operations. This can also be set through the following // URI options: // @@ -857,6 +901,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if len(opt.Hosts) > 0 { c.Hosts = opt.Hosts } + if opt.HTTPClient != nil { + c.HTTPClient = opt.HTTPClient + } if opt.LoadBalanced != nil { c.LoadBalanced = opt.LoadBalanced } @@ -920,6 +967,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.SRVServiceName != nil { c.SRVServiceName = opt.SRVServiceName } + if opt.Timeout != nil { + c.Timeout = opt.Timeout + } if opt.TLSConfig != nil { c.TLSConfig = opt.TLSConfig } @@ -983,7 +1033,9 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := append(keyData, '\n') + data := make([]byte, 0, len(keyData)+len(certData)+1) + data = append(data, keyData...) + data = append(data, '\n') data = append(data, certData...) return addClientCertFromBytes(cfg, data, keyPassword) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 5c81114..e8b68a2 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -15,20 +15,20 @@ import ( // CollectionOptions represents options that can be used to configure a Collection. type CollectionOptions struct { - // The read concern to use for operations executed on the Collection. The default value is nil, which means that - // the read concern of the database used to configure the Collection will be used. + // ReadConcern is the read concern to use for operations executed on the Collection. The default value is nil, which means that + // the read concern of the Database used to configure the Collection will be used. ReadConcern *readconcern.ReadConcern - // The write concern to use for operations executed on the Collection. The default value is nil, which means that - // the write concern of the database used to configure the Collection will be used. + // WriteConcern is the write concern to use for operations executed on the Collection. The default value is nil, which means that + // the write concern of the Database used to configure the Collection will be used. WriteConcern *writeconcern.WriteConcern - // The read preference to use for operations executed on the Collection. The default value is nil, which means that - // the read preference of the database used to configure the Collection will be used. + // ReadPreference is the read preference to use for operations executed on the Collection. The default value is nil, which means that + // the read preference of the Database used to configure the Collection will be used. ReadPreference *readpref.ReadPref - // The BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value - // is nil, which means that the registry of the database used to configure the Collection will be used. + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value + // is nil, which means that the registry of the Database used to configure the Collection will be used. Registry *bsoncodec.Registry } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go index 094524c..f772ec4 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go @@ -15,6 +15,13 @@ type CountOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation + // TODO(GODRIVER-2386): CountOptions executor uses aggregation under the hood, which means this type has to be + // TODO a string for now. This can be replaced with `Comment interface{}` once 2386 is implemented. + + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default is nil, which means that no comment will be included in the logs. + Comment *string + // The index to use for the aggregation. This should either be the index name as a string or the index specification // as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, // which means that no hint will be sent. @@ -26,6 +33,10 @@ type CountOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there is // no time limit for query execution. + // + // NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout option may be used in + // its place to control the amount of time that a single operation can run before returning an error. MaxTime is + // ignored if Timeout is set on the client. MaxTime *time.Duration // The number of documents to skip before counting. The default value is 0. @@ -43,6 +54,12 @@ func (co *CountOptions) SetCollation(c *Collation) *CountOptions { return co } +// SetComment sets the value for the Comment field. +func (co *CountOptions) SetComment(c string) *CountOptions { + co.Comment = &c + return co +} + // SetHint sets the value for the Hint field. func (co *CountOptions) SetHint(h interface{}) *CountOptions { co.Hint = h @@ -56,6 +73,10 @@ func (co *CountOptions) SetLimit(i int64) *CountOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// NOTE(benjirewis): MaxTime will be deprecated in a future release. The more general Timeout +// option may be used in its place to control the amount of time that a single operation can +// run before returning an error. MaxTime is ignored if Timeout is set on the client. func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions { co.MaxTime = &d return co @@ -77,6 +98,9 @@ func MergeCountOptions(opts ...*CountOptions) *CountOptions { if co.Collation != nil { countOpts.Collation = co.Collation } + if co.Comment != nil { + countOpts.Comment = co.Comment + } if co.Hint != nil { countOpts.Hint = co.Hint } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 130c8e7..6fc7d06 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -67,7 +67,7 @@ func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOpti // CreateCollectionOptions represents options that can be used to configure a CreateCollection operation. type CreateCollectionOptions struct { - // Specifies if the collection is capped (see https://docs.mongodb.com/manual/core/capped-collections/). If true, + // Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true, // the SizeInBytes option must also be specified. The default value is false. Capped *bool @@ -75,6 +75,12 @@ type CreateCollectionOptions struct { // For previous server versions, the driver will return an error if this option is used. The default value is nil. Collation *Collation + // Specifies how change streams opened against the collection can return pre- and post-images of updated + // documents. The value must be a document in the form {