From db88e581adf84877aabcf3237f5ed64d37e265b4 Mon Sep 17 00:00:00 2001 From: John Collier Date: Mon, 10 Jan 2022 11:59:01 -0500 Subject: [PATCH] Update to oras.land/oras-go v0.4.0 (#96) Signed-off-by: John Collier --- registry-library/go.mod | 5 +- registry-library/go.sum | 314 +- registry-library/library/library.go | 6 +- .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + .../github.com/Microsoft/go-winio/fileinfo.go | 36 +- .../github.com/Microsoft/go-winio/go.mod | 6 +- .../github.com/Microsoft/go-winio/go.sum | 20 +- .../github.com/Microsoft/go-winio/hvsock.go | 2 + .../github.com/Microsoft/go-winio/pipe.go | 21 +- .../Microsoft/go-winio/pkg/guid/guid.go | 2 + .../pkg/security/grantvmgroupaccess.go | 161 ++ .../go-winio/pkg/security/syscall_windows.go | 7 + .../go-winio/pkg/security/zsyscall_windows.go | 70 + .../github.com/Microsoft/go-winio/syscall.go | 2 +- .../github.com/Microsoft/go-winio/vhd/vhd.go | 323 +++ .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 + .../Microsoft/go-winio/zsyscall_windows.go | 577 ++-- .../Microsoft/hcsshim/.gitattributes | 1 + .../github.com/Microsoft/hcsshim/.gitignore | 2 + .../Microsoft/hcsshim/.gometalinter.json | 17 - .../github.com/Microsoft/hcsshim/CODEOWNERS | 3 + .../Microsoft/hcsshim/Protobuild.toml | 22 +- .../github.com/Microsoft/hcsshim/README.md | 9 +- .../github.com/Microsoft/hcsshim/appveyor.yml | 41 - .../hcsshim/computestorage/attach.go | 38 + .../hcsshim/computestorage/destroy.go | 26 + .../hcsshim/computestorage/detach.go | 26 + .../hcsshim/computestorage/export.go | 46 + .../hcsshim/computestorage/format.go | 26 + .../hcsshim/computestorage/helpers.go | 193 ++ .../hcsshim/computestorage/import.go | 41 + .../hcsshim/computestorage/initialize.go | 38 + .../Microsoft/hcsshim/computestorage/mount.go | 27 + .../Microsoft/hcsshim/computestorage/setup.go | 74 + .../hcsshim/computestorage/storage.go | 50 + .../computestorage/zsyscall_windows.go | 319 +++ .../github.com/Microsoft/hcsshim/errors.go | 38 +- .../github.com/Microsoft/hcsshim/go.mod | 47 +- .../github.com/Microsoft/hcsshim/go.sum | 812 +++++- .../Microsoft/hcsshim/hnsendpoint.go | 6 + .../github.com/Microsoft/hcsshim/hnspolicy.go | 3 + .../Microsoft/hcsshim/internal/cow/cow.go | 2 + .../hcsshim/internal/hcs/callback.go | 1 + .../Microsoft/hcsshim/internal/hcs/cgo.go | 7 - .../Microsoft/hcsshim/internal/hcs/errors.go | 23 +- .../Microsoft/hcsshim/internal/hcs/process.go | 20 +- .../Microsoft/hcsshim/internal/hcs/service.go | 49 + .../Microsoft/hcsshim/internal/hcs/system.go | 155 +- .../Microsoft/hcsshim/internal/hcs/utils.go | 29 + .../hcsshim/internal/hcs/waithelper.go | 1 - .../hcsshim/internal/hns/hnsendpoint.go | 24 + .../hcsshim/internal/hns/hnsnetwork.go | 6 - .../hcsshim/internal/hns/hnspolicy.go | 10 + .../hcsshim/internal/hns/namespace.go | 7 +- .../hcsshim/internal/safefile/safeopen.go | 162 +- .../internal/safefile/zsyscall_windows.go | 79 - .../hcsshim/internal/schema1/schema1.go | 14 +- ...r_credential_guard_add_instance_request.go | 16 + ...edential_guard_hv_socket_service_config.go | 15 + .../container_credential_guard_instance.go | 16 + ...ainer_credential_guard_modify_operation.go | 17 + ...iner_credential_guard_operation_request.go | 15 + ...redential_guard_remove_instance_request.go | 14 + .../container_credential_guard_system_info.go | 14 + .../hcsshim/internal/schema2/cpu_group.go | 15 + .../internal/schema2/cpu_group_affinity.go | 15 + .../internal/schema2/cpu_group_config.go | 18 + .../schema2/cpu_group_configurations.go | 15 + .../internal/schema2/cpu_group_operations.go | 18 + .../internal/schema2/cpu_group_property.go | 15 + .../schema2/create_group_operation.go | 17 + .../schema2/delete_group_operation.go | 15 + .../hcsshim/internal/schema2/device.go | 15 +- .../hcsshim/internal/schema2/devices.go | 4 + .../schema2/host_processor_modify_request.go | 16 + .../internal/schema2/hv_socket_address.go | 17 + .../schema2/hv_socket_service_config.go | 6 + .../schema2/interrupt_moderation_mode.go | 42 + .../hcsshim/internal/schema2/iov_settings.go | 22 + .../internal/schema2/logical_processor.go | 18 + .../hcsshim/internal/schema2/memory.go | 2 +- .../hcsshim/internal/schema2/memory_2.go | 21 +- .../internal/schema2/modification_request.go | 15 + .../internal/schema2/network_adapter.go | 3 +- .../internal/schema2/processor_topology.go | 15 + .../hcsshim/internal/schema2/property_type.go | 3 + .../internal/schema2/service_properties.go | 18 + .../internal/schema2/virtual_pci_device.go | 16 + .../internal/schema2/virtual_pci_function.go | 18 + .../internal/schema2/vm_processor_limits.go | 22 + .../hcsshim/internal/timeout/timeout.go | 4 + .../hcsshim/internal/vmcompute/vmcompute.go | 47 +- .../internal/vmcompute/zsyscall_windows.go | 48 + .../hcsshim/internal/wclayer/activatelayer.go | 23 +- .../hcsshim/internal/wclayer/baselayer.go | 29 +- .../hcsshim/internal/wclayer/createlayer.go | 26 +- .../internal/wclayer/createscratchlayer.go | 32 +- .../internal/wclayer/deactivatelayer.go | 23 +- .../hcsshim/internal/wclayer/destroylayer.go | 23 +- .../internal/wclayer/expandscratchsize.go | 31 +- .../hcsshim/internal/wclayer/exportlayer.go | 60 +- .../internal/wclayer/getlayermountpath.go | 34 +- .../internal/wclayer/getsharedbaseimages.go | 22 +- .../hcsshim/internal/wclayer/grantvmaccess.go | 26 +- .../hcsshim/internal/wclayer/importlayer.go | 73 +- .../hcsshim/internal/wclayer/layerexists.go | 25 +- .../hcsshim/internal/wclayer/layerid.go | 13 +- .../hcsshim/internal/wclayer/layerutils.go | 5 +- .../hcsshim/internal/wclayer/legacy.go | 40 +- .../hcsshim/internal/wclayer/nametoguid.go | 31 +- .../hcsshim/internal/wclayer/preparelayer.go | 27 +- .../hcsshim/internal/wclayer/processimage.go | 32 +- .../internal/wclayer/unpreparelayer.go | 23 +- .../hcsshim/internal/wclayer/wclayer.go | 3 + .../hcsshim/internal/winapi/devices.go | 13 + .../hcsshim/internal/winapi/errors.go | 15 + .../hcsshim/internal/winapi/filesystem.go | 110 + .../Microsoft/hcsshim/internal/winapi/iocp.go | 3 + .../hcsshim/internal/winapi/jobobject.go | 215 ++ .../hcsshim/internal/winapi/logon.go | 30 + .../hcsshim/internal/winapi/memory.go | 27 + .../Microsoft/hcsshim/internal/winapi/net.go | 3 + .../Microsoft/hcsshim/internal/winapi/path.go | 11 + .../hcsshim/internal/winapi/process.go | 10 + .../hcsshim/internal/winapi/processor.go | 7 + .../hcsshim/internal/winapi/system.go | 52 + .../hcsshim/internal/winapi/thread.go | 12 + .../hcsshim/internal/winapi/utils.go | 75 + .../hcsshim/internal/winapi/winapi.go | 5 + .../internal/winapi/zsyscall_windows.go | 371 +++ .../github.com/Microsoft/hcsshim/layer.go | 41 +- .../hcsshim/osversion/osversion_windows.go | 15 - .../hcsshim/osversion/windowsbuilds.go | 11 + .../containerd/cgroups/stats/v1/metrics.pb.go | 2527 +++++++++++------ .../cgroups/stats/v1/metrics.pb.txt | 78 + .../containerd/cgroups/stats/v1/metrics.proto | 22 + .../archive/compression/compression.go | 21 + .../containerd/containerd/content/adaptor.go | 52 + .../containerd/containerd/content/content.go | 2 +- .../containerd/content/local/locks.go | 13 +- .../containerd/content/local/readerat.go | 28 + .../containerd/content/local/store.go | 36 +- .../containerd/content/local/store_unix.go | 2 +- .../containerd/containerd/images/diffid.go | 81 + .../containerd/containerd/images/image.go | 2 +- .../containerd/images/mediatypes.go | 53 +- .../{sys/env.go => labels/labels.go} | 20 +- .../containerd/containerd/log/context.go | 14 +- .../{sys => pkg/userns}/userns_linux.go | 2 +- .../{sys => pkg/userns}/userns_unsupported.go | 2 +- .../containerd/platforms/compare.go | 166 +- .../containerd/platforms/cpuinfo.go | 35 +- .../containerd/platforms/defaults.go | 7 +- .../containerd/platforms/defaults_windows.go | 60 +- .../containerd/platforms/platforms.go | 4 +- .../containerd/reference/reference.go | 4 + .../containerd/remotes/docker/auth/fetch.go | 202 ++ .../remotes/docker/{auth.go => auth/parse.go} | 41 +- .../containerd/remotes/docker/authorizer.go | 285 +- .../containerd/remotes/docker/errcode.go | 2 +- .../containerd/remotes/docker/fetcher.go | 2 +- .../remotes/docker/httpreadseeker.go | 2 +- .../containerd/remotes/docker/pusher.go | 92 +- .../containerd/remotes/docker/registry.go | 41 +- .../containerd/remotes/docker/resolver.go | 54 +- .../containerd/remotes/docker/scope.go | 14 +- .../containerd/remotes/docker/status.go | 22 +- .../containerd/remotes/errors/errors.go | 56 + .../containerd/containerd/remotes/handlers.go | 41 +- .../containerd/containerd/remotes/resolver.go | 2 + .../containerd/containerd/sys/filesys.go | 35 - .../containerd/sys/filesys_windows.go | 70 +- .../containerd/containerd/sys/oom_linux.go | 83 + .../sys/{oom_unix.go => oom_unsupported.go} | 47 +- .../containerd/containerd/sys/stat_bsd.go | 2 +- .../containerd/containerd/sys/stat_openbsd.go | 45 + .../{oom_windows.go => userns_deprecated.go} | 16 +- .../containerd/containerd/version/version.go | 2 +- .../deislabs/oras/pkg/artifact/consts.go | 7 - .../deislabs/oras/pkg/content/consts.go | 28 - .../deislabs/oras/pkg/content/errors.go | 17 - .../deislabs/oras/pkg/content/interface.go | 9 - .../deislabs/oras/pkg/content/readerat.go | 34 - .../deislabs/oras/pkg/context/context.go | 9 - .../deislabs/oras/pkg/oras/store.go | 119 - .../github.com/klauspost/compress/LICENSE | 28 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 ++ .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 684 +++++ .../klauspost/compress/fse/decompress.go | 374 +++ .../github.com/klauspost/compress/fse/fse.go | 144 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 329 +++ .../klauspost/compress/huff0/bitwriter.go | 210 ++ .../klauspost/compress/huff0/bytereader.go | 54 + .../klauspost/compress/huff0/compress.go | 657 +++++ .../klauspost/compress/huff0/decompress.go | 1164 ++++++++ .../klauspost/compress/huff0/huff0.go | 273 ++ .../klauspost/compress/snappy/.gitignore | 16 + .../klauspost/compress/snappy/AUTHORS | 15 + .../klauspost/compress/snappy/CONTRIBUTORS | 37 + .../klauspost/compress/snappy/LICENSE | 27 + .../klauspost/compress/snappy/README | 107 + .../klauspost/compress/snappy/decode.go | 237 ++ .../klauspost/compress/snappy/decode_amd64.go | 14 + .../klauspost/compress/snappy/decode_amd64.s | 482 ++++ .../klauspost/compress/snappy/decode_other.go | 115 + .../klauspost/compress/snappy/encode.go | 285 ++ .../klauspost/compress/snappy/encode_amd64.go | 29 + .../klauspost/compress/snappy/encode_amd64.s | 730 +++++ .../klauspost/compress/snappy/encode_other.go | 238 ++ .../klauspost/compress/snappy/runbench.cmd | 2 + .../klauspost/compress/snappy/snappy.go | 98 + .../klauspost/compress/zstd/README.md | 417 +++ .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 169 ++ .../klauspost/compress/zstd/blockdec.go | 739 +++++ .../klauspost/compress/zstd/blockenc.go | 871 ++++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 127 + .../klauspost/compress/zstd/bytereader.go | 88 + .../klauspost/compress/zstd/decodeheader.go | 202 ++ .../klauspost/compress/zstd/decoder.go | 561 ++++ .../compress/zstd/decoder_options.go | 84 + .../klauspost/compress/zstd/dict.go | 122 + .../klauspost/compress/zstd/enc_base.go | 177 ++ .../klauspost/compress/zstd/enc_best.go | 487 ++++ .../klauspost/compress/zstd/enc_better.go | 1170 ++++++++ .../klauspost/compress/zstd/enc_dfast.go | 1121 ++++++++ .../klauspost/compress/zstd/enc_fast.go | 1018 +++++++ .../klauspost/compress/zstd/encoder.go | 576 ++++ .../compress/zstd/encoder_options.go | 312 ++ .../klauspost/compress/zstd/framedec.go | 494 ++++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 385 +++ .../klauspost/compress/zstd/fse_encoder.go | 726 +++++ .../klauspost/compress/zstd/fse_predefined.go | 158 ++ .../klauspost/compress/zstd/hash.go | 77 + .../klauspost/compress/zstd/history.go | 89 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 58 + .../compress/zstd/internal/xxhash/xxhash.go | 238 ++ .../zstd/internal/xxhash/xxhash_amd64.go | 13 + .../zstd/internal/xxhash/xxhash_amd64.s | 215 ++ .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 492 ++++ .../klauspost/compress/zstd/seqenc.go | 115 + .../klauspost/compress/zstd/snappy.go | 436 +++ .../klauspost/compress/zstd/zstd.go | 156 + .../LICENSE.code => moby/locker/LICENSE} | 3 +- .../vendor/github.com/moby/locker/README.md | 65 + .../vendor/github.com/moby/locker/go.mod | 3 + .../vendor/github.com/moby/locker/locker.go | 112 + .../opencontainers/go-digest/.mailmap | 3 + .../opencontainers/go-digest/.pullapprove.yml | 38 +- .../opencontainers/go-digest/.travis.yml | 3 +- .../opencontainers/go-digest/LICENSE | 192 ++ .../opencontainers/go-digest/MAINTAINERS | 12 +- .../opencontainers/go-digest/README.md | 70 +- .../opencontainers/go-digest/algorithm.go | 1 + .../opencontainers/go-digest/digest.go | 1 + .../opencontainers/go-digest/digester.go | 1 + .../opencontainers/go-digest/doc.go | 10 +- .../opencontainers/go-digest/go.mod | 3 + .../opencontainers/go-digest/verifiers.go | 1 + .../pelletier/go-toml/.dockerignore | 2 + .../github.com/pelletier/go-toml/.gitignore | 3 + .../github.com/pelletier/go-toml/.travis.yml | 23 - .../pelletier/go-toml/CONTRIBUTING.md | 132 + .../github.com/pelletier/go-toml/Dockerfile | 11 + .../github.com/pelletier/go-toml/Makefile | 29 + .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 + .../github.com/pelletier/go-toml/README.md | 44 +- .../pelletier/go-toml/azure-pipelines.yml | 230 ++ .../pelletier/go-toml/benchmark.json | 164 -- .../github.com/pelletier/go-toml/benchmark.sh | 9 +- .../pelletier/go-toml/benchmark.toml | 244 -- .../pelletier/go-toml/benchmark.yml | 121 - .../github.com/pelletier/go-toml/doc.go | 2 +- .../pelletier/go-toml/example-crlf.toml | 1 + .../github.com/pelletier/go-toml/example.toml | 1 + .../github.com/pelletier/go-toml/fuzzit.sh | 26 + .../github.com/pelletier/go-toml/go.mod | 5 + .../github.com/pelletier/go-toml/go.sum | 19 + .../pelletier/go-toml/keysparsing.go | 139 +- .../github.com/pelletier/go-toml/lexer.go | 163 +- .../github.com/pelletier/go-toml/localtime.go | 281 ++ .../github.com/pelletier/go-toml/marshal.go | 876 +++++- .../go-toml/marshal_OrderPreserve_test.toml | 39 + .../pelletier/go-toml/marshal_test.toml | 1 + .../github.com/pelletier/go-toml/parser.go | 97 +- .../github.com/pelletier/go-toml/test.sh | 88 - .../github.com/pelletier/go-toml/token.go | 22 +- .../github.com/pelletier/go-toml/toml.go | 256 +- .../pelletier/go-toml/tomltree_create.go | 13 + .../pelletier/go-toml/tomltree_write.go | 364 ++- .../github.com/sirupsen/logrus/.travis.yml | 14 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 36 + .../github.com/sirupsen/logrus/README.md | 2 +- .../github.com/sirupsen/logrus/entry.go | 73 +- .../vendor/github.com/sirupsen/logrus/go.sum | 2 - .../sirupsen/logrus/json_formatter.go | 5 +- .../github.com/sirupsen/logrus/logger.go | 2 +- .../sirupsen/logrus/terminal_check_unix.go | 2 +- .../sirupsen/logrus/text_formatter.go | 7 +- .../google.golang.org/grpc/codes/codes.go | 46 + .../grpc/connectivity/connectivity.go | 73 - .../google.golang.org/grpc/grpclog/grpclog.go | 126 - .../google.golang.org/grpc/grpclog/logger.go | 85 - .../grpc/grpclog/loggerv2.go | 195 -- .../grpc/internal/internal.go | 72 - .../grpc/internal/status/status.go | 162 ++ .../google.golang.org/grpc/status/status.go | 119 +- registry-library/vendor/modules.txt | 51 +- .../oras => oras.land/oras-go}/LICENSE | 3 +- .../oras.land/oras-go/pkg/artifact/consts.go | 22 + .../oras.land/oras-go/pkg/content/consts.go | 57 + .../oras-go/pkg/content/decompressstore.go | 158 ++ .../oras.land/oras-go/pkg/content/errors.go | 32 + .../oras-go}/pkg/content/file.go | 51 +- .../oras.land/oras-go/pkg/content/gunzip.go | 72 + .../oras-go/pkg/content/interface.go | 24 + .../oras.land/oras-go/pkg/content/iowriter.go | 112 + .../oras-go}/pkg/content/memory.go | 15 + .../oras-go/pkg/content/multireader.go | 55 + .../oras-go/pkg/content/multiwriter.go | 31 + .../oras-go}/pkg/content/oci.go | 20 +- .../oras.land/oras-go/pkg/content/opts.go | 112 + .../oras-go/pkg/content/passthrough.go | 286 ++ .../oras.land/oras-go/pkg/content/readerat.go | 49 + .../oras.land/oras-go/pkg/content/untar.go | 157 + .../oras-go}/pkg/content/utils.go | 60 +- .../oras.land/oras-go/pkg/context/context.go | 24 + .../oras-go}/pkg/context/logger.go | 15 + .../oras-go}/pkg/oras/errors.go | 16 +- .../oras-go}/pkg/oras/pull.go | 21 +- .../oras-go}/pkg/oras/pull_opts.go | 69 +- .../oras-go}/pkg/oras/push.go | 28 +- .../oras-go}/pkg/oras/push_opts.go | 40 +- .../oras.land/oras-go/pkg/oras/store.go | 239 ++ 344 files changed, 32190 insertions(+), 4790 deletions(-) create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 registry-library/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/.gitattributes delete mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/.gometalinter.json create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/CODEOWNERS delete mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/appveyor.yml create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/export.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/format.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/import.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go delete mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go delete mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_affinity.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_config.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_configurations.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_operations.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_property.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/create_group_operation.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/delete_group_operation.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/host_processor_modify_request.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/interrupt_moderation_mode.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/iov_settings.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_processor_limits.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go create mode 100644 registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go create mode 100644 registry-library/vendor/github.com/containerd/containerd/content/adaptor.go create mode 100644 registry-library/vendor/github.com/containerd/containerd/images/diffid.go rename registry-library/vendor/github.com/containerd/containerd/{sys/env.go => labels/labels.go} (61%) rename registry-library/vendor/github.com/containerd/containerd/{sys => pkg/userns}/userns_linux.go (98%) rename registry-library/vendor/github.com/containerd/containerd/{sys => pkg/userns}/userns_unsupported.go (98%) create mode 100644 registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go rename registry-library/vendor/github.com/containerd/containerd/remotes/docker/{auth.go => auth/parse.go} (81%) create mode 100644 registry-library/vendor/github.com/containerd/containerd/remotes/errors/errors.go delete mode 100644 registry-library/vendor/github.com/containerd/containerd/sys/filesys.go create mode 100644 registry-library/vendor/github.com/containerd/containerd/sys/oom_linux.go rename registry-library/vendor/github.com/containerd/containerd/sys/{oom_unix.go => oom_unsupported.go} (50%) create mode 100644 registry-library/vendor/github.com/containerd/containerd/sys/stat_openbsd.go rename registry-library/vendor/github.com/containerd/containerd/sys/{oom_windows.go => userns_deprecated.go} (68%) delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/artifact/consts.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/content/consts.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/content/errors.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/content/interface.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/content/readerat.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/context/context.go delete mode 100644 registry-library/vendor/github.com/deislabs/oras/pkg/oras/store.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/LICENSE create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/README.md create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/.gitignore create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/AUTHORS create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/LICENSE create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/README create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/decode.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.s create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/decode_other.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/encode.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.s create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/encode_other.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/runbench.cmd create mode 100644 registry-library/vendor/github.com/klauspost/compress/snappy/snappy.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 registry-library/vendor/github.com/klauspost/compress/zstd/zstd.go rename registry-library/vendor/github.com/{opencontainers/go-digest/LICENSE.code => moby/locker/LICENSE} (99%) create mode 100644 registry-library/vendor/github.com/moby/locker/README.md create mode 100644 registry-library/vendor/github.com/moby/locker/go.mod create mode 100644 registry-library/vendor/github.com/moby/locker/locker.go create mode 100644 registry-library/vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 registry-library/vendor/github.com/opencontainers/go-digest/go.mod create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/.dockerignore delete mode 100644 registry-library/vendor/github.com/pelletier/go-toml/.travis.yml create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/Dockerfile create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/Makefile create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 registry-library/vendor/github.com/pelletier/go-toml/benchmark.json delete mode 100644 registry-library/vendor/github.com/pelletier/go-toml/benchmark.toml delete mode 100644 registry-library/vendor/github.com/pelletier/go-toml/benchmark.yml create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/fuzzit.sh create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/go.mod create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/go.sum create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/localtime.go create mode 100644 registry-library/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml delete mode 100644 registry-library/vendor/github.com/pelletier/go-toml/test.sh delete mode 100644 registry-library/vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100644 registry-library/vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 registry-library/vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 registry-library/vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 registry-library/vendor/google.golang.org/grpc/internal/internal.go create mode 100644 registry-library/vendor/google.golang.org/grpc/internal/status/status.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/LICENSE (98%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/artifact/consts.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/consts.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/decompressstore.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/errors.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/content/file.go (86%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/gunzip.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/interface.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/iowriter.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/content/memory.go (90%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/multireader.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/multiwriter.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/content/oci.go (86%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/opts.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/passthrough.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/readerat.go create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/content/untar.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/content/utils.go (68%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/context/context.go rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/context/logger.go (66%) rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/oras/errors.go (52%) rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/oras/pull.go (84%) rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/oras/pull_opts.go (52%) rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/oras/push.go (76%) rename registry-library/vendor/{github.com/deislabs/oras => oras.land/oras-go}/pkg/oras/push_opts.go (74%) create mode 100644 registry-library/vendor/oras.land/oras-go/pkg/oras/store.go diff --git a/registry-library/go.mod b/registry-library/go.mod index d1b20f5a..e13aafe8 100644 --- a/registry-library/go.mod +++ b/registry-library/go.mod @@ -3,10 +3,11 @@ module github.com/devfile/registry-support/registry-library go 1.14 require ( - github.com/containerd/containerd v1.4.3 - github.com/deislabs/oras v0.8.1 + github.com/containerd/containerd v1.5.2 github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f github.com/mitchellh/go-homedir v1.1.0 github.com/spf13/cobra v1.1.1 github.com/spf13/viper v1.7.1 + oras.land/oras-go v0.4.0 + rsc.io/letsencrypt v0.0.3 // indirect ) diff --git a/registry-library/go.sum b/registry-library/go.sum index bd96712b..5241f6b0 100644 --- a/registry-library/go.sum +++ b/registry-library/go.sum @@ -27,19 +27,36 @@ github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -54,6 +71,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -76,6 +94,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= @@ -87,46 +106,129 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ= +github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deislabs/oras v0.8.1 h1:If674KraJVpujYR00rzdi0QAmW4BxzMJPVAZJKuhQ0c= -github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devfile/api/v2 v2.0.0-20210910153124-da620cd1a7a1/go.mod h1:kLX/nW93gigOHXK3NLeJL2fSS/sgEe+OHu8bo3aoOi4= github.com/devfile/library v1.1.1-0.20210910214722-7c5ff63711ec/go.mod h1:svPWwWb+BP15SXCHl0dyOeE4Sohrjl5a2BaOzc/riLc= @@ -136,14 +238,19 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20191216044856-a8371794149d h1:jC8tT/S0OGx2cswpeUTn4gOIea8P08lD3VFQT0cOZ50= -github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= @@ -154,16 +261,20 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -198,13 +309,19 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -223,6 +340,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -241,6 +359,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -258,6 +378,7 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -269,6 +390,7 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -305,9 +427,13 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -328,12 +454,17 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -356,10 +487,13 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -373,7 +507,14 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -382,6 +523,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -394,33 +536,49 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/openshift/api v0.0.0-20200930075302-db52bc4ef99f/go.mod h1:Si/I9UGeRR3qzg01YWPmtlr0GeGk2fnuggXJRmjAZ6U= github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -437,6 +595,7 @@ github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -450,14 +609,18 @@ github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7q github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= @@ -465,20 +628,23 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -493,7 +659,7 @@ github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -504,14 +670,17 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -522,11 +691,24 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -545,6 +727,7 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -560,18 +743,20 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -606,6 +791,7 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -616,6 +802,7 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -623,6 +810,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -633,8 +821,12 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -648,9 +840,11 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -658,7 +852,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -667,38 +860,58 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -719,6 +932,7 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -794,6 +1008,7 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -802,6 +1017,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -814,12 +1030,18 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -852,6 +1074,8 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -878,17 +1102,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -896,21 +1139,30 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +oras.land/oras-go v0.4.0 h1:u6+7D+raZDYHwlz/uOwNANiRmyYDSSMW7A9E1xXycUQ= +oras.land/oras-go v0.4.0/go.mod h1:VJcU+VE4rkclUbum5C0O7deEZbBYnsnpbGSACwTjOcg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= +rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/registry-library/library/library.go b/registry-library/library/library.go index c06fc17b..faeb597d 100644 --- a/registry-library/library/library.go +++ b/registry-library/library/library.go @@ -30,12 +30,12 @@ import ( "text/tabwriter" "time" - orasctx "github.com/deislabs/oras/pkg/context" + orasctx "oras.land/oras-go/pkg/context" "github.com/containerd/containerd/remotes/docker" - "github.com/deislabs/oras/pkg/content" - "github.com/deislabs/oras/pkg/oras" indexSchema "github.com/devfile/registry-support/index/generator/schema" + "oras.land/oras-go/pkg/content" + "oras.land/oras-go/pkg/oras" ) const ( diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/registry-library/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 00000000..ae1b4942 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/fileinfo.go b/registry-library/vendor/github.com/Microsoft/go-winio/fileinfo.go index ada2fbab..3ab6bff6 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -5,21 +5,14 @@ package winio import ( "os" "runtime" - "syscall" "unsafe" -) - -//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx -//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle -const ( - fileBasicInfo = 0 - fileIDInfo = 0x12 + "golang.org/x/sys/windows" ) // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 pad uint32 // padding } @@ -27,7 +20,7 @@ type FileBasicInfo struct { // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -36,13 +29,32 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return nil } +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + // FileIDInfo contains the volume serial number and file ID for a file. This pair should be // unique on a system. type FileIDInfo struct { @@ -53,7 +65,7 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/go.mod b/registry-library/vendor/github.com/Microsoft/go-winio/go.mod index 50b9d6e2..98a8dea0 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/go.mod +++ b/registry-library/vendor/github.com/Microsoft/go-winio/go.mod @@ -3,7 +3,7 @@ module github.com/Microsoft/go-winio go 1.12 require ( - github.com/pkg/errors v0.8.1 - github.com/sirupsen/logrus v1.4.1 - golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c ) diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/go.sum b/registry-library/vendor/github.com/Microsoft/go-winio/go.sum index 209aa8cf..aa6ad3b5 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/go.sum +++ b/registry-library/vendor/github.com/Microsoft/go-winio/go.sum @@ -1,18 +1,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/hvsock.go b/registry-library/vendor/github.com/Microsoft/go-winio/hvsock.go index dbfe790e..b632f8f8 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,5 @@ +// +build windows + package winio import ( diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/pipe.go b/registry-library/vendor/github.com/Microsoft/go-winio/pipe.go index d6a46f6a..96700a73 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/pipe.go @@ -182,13 +182,14 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { for { + select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) if err == nil { return h, nil } @@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { } // Wait 10 msec and try again. This is a rather simplistic // view, as we always try each 10 milliseconds. - time.Sleep(time.Millisecond * 10) + time.Sleep(10 * time.Millisecond) } } } @@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { if timeout != nil { absTimeout = time.Now().Add(*timeout) } else { - absTimeout = time.Now().Add(time.Second * 2) + absTimeout = time.Now().Add(2 * time.Second) } ctx, _ := context.WithDeadline(context.Background(), absTimeout) conn, err := DialPipeContext(ctx, path) @@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - h, err = tryDialPipe(ctx, &path) + h, err = tryDialPipe(ctx, &path, access) if err != nil { return nil, err } @@ -422,10 +429,10 @@ type PipeConfig struct { // when the pipe is in message mode. MessageMode bool - // InputBufferSize specifies the size the input buffer, in bytes. + // InputBufferSize specifies the size of the input buffer, in bytes. InputBufferSize int32 - // OutputBufferSize specifies the size the input buffer, in bytes. + // OutputBufferSize specifies the size of the output buffer, in bytes. OutputBufferSize int32 } diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index 58640657..f497c0e3 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,3 +1,5 @@ +// +build windows + // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go new file mode 100644 index 00000000..fca24159 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -0,0 +1,161 @@ +// +build windows + +package security + +import ( + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 + + explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee + } + + trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr + } +) + +const ( + accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVMGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer syscall.CloseHandle(fd) + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + if err != nil { + return err // Already wrapped + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep := syscall.StringToUTF16(name) + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + explicitAccess{ + accessPermissions: accessMaskDesiredPermission, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + } + + return modifiedDACL, nil +} diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go new file mode 100644 index 00000000..c40c2739 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) [failretval!=0] = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) [failretval!=0] = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) [failretval!=0] = advapi32.SetEntriesInAclW diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go new file mode 100644 index 00000000..4a90cb3c --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -0,0 +1,70 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/syscall.go b/registry-library/vendor/github.com/Microsoft/go-winio/syscall.go index 5cb52bc7..5955c99f 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/registry-library/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 00000000..b03b789e --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,323 @@ +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) [failretval != 0] = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 AttachVersion2 +} + +const ( + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + if err := syscall.CloseHandle(handle); err != nil { + return err + } + return nil +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return errors.Wrap(err, "failed to detach virtual disk") + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return errors.Wrap(err, "failed to attach virtual disk") + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + parameters, + &handle, + ); err != nil { + return 0, errors.Wrap(err, "failed to open virtual disk") + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, errors.Wrap(err, "failed to create virtual disk") + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", errors.Wrap(err, "failed to get disk physical path") + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %s", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %s", err) + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/registry-library/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 00000000..572f7b42 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,106 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/registry-library/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index e26b01fa..176ff75e 100644 --- a/registry-library/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/registry-library/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -19,6 +19,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +27,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,514 +38,382 @@ func errnoErr(e syscall.Errno) error { } var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procLocalFree = modkernel32.NewProc("LocalFree") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procbind = modws2_32.NewProc("bind") ) -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) } return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(accountName) if err != nil { return } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) + _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) } -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return } - return + return _lookupPrivilegeName(_p0, luid, buffer, size) } -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return } -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) + _p0, err = syscall.UTF16PtrFromString(name) if err != nil { return } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) } return } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return } - return + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) } return } -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) return } -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) return } -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return } -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) return } -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return } -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -552,11 +421,7 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/.gitattributes b/registry-library/vendor/github.com/Microsoft/hcsshim/.gitattributes new file mode 100644 index 00000000..94f480de --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/.gitignore b/registry-library/vendor/github.com/Microsoft/hcsshim/.gitignore index b883f1fd..aec9bd4b 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -1 +1,3 @@ *.exe +.idea +.vscode diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/.gometalinter.json b/registry-library/vendor/github.com/Microsoft/hcsshim/.gometalinter.json deleted file mode 100644 index 00e9a6e2..00000000 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/.gometalinter.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Vendor": true, - "Deadline": "2m", - "Sort": [ - "linter", - "severity", - "path", - "line" - ], - "Skip": [ - "internal\\schema2" - ], - "EnableGC": true, - "Enable": [ - "gofmt" - ] -} \ No newline at end of file diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/registry-library/vendor/github.com/Microsoft/hcsshim/CODEOWNERS new file mode 100644 index 00000000..87f49df3 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/CODEOWNERS @@ -0,0 +1,3 @@ +* @microsoft/containerplat + +/hcn/* @nagiesek \ No newline at end of file diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/registry-library/vendor/github.com/Microsoft/hcsshim/Protobuild.toml index 47d7650f..8bb92085 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/Protobuild.toml +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -35,20 +35,10 @@ plugins = ["grpc", "fieldpath"] prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] plugins = ["ttrpc"] -# Lock down runhcs config - -[[descriptors]] -prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" -target = "cmd/containerd-shim-runhcs-v1/options/next.pb.txt" -ignore_files = [ - "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" -] +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] +plugins = ["ttrpc"] -[[descriptors]] -prefix = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" -target = "cmd/containerd-shim-runhcs-v1/stats/next.pb.txt" -ignore_files = [ - "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" -] \ No newline at end of file +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] +plugins = ["ttrpc"] \ No newline at end of file diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/README.md b/registry-library/vendor/github.com/Microsoft/hcsshim/README.md index 15b39181..95c30036 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/README.md +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/README.md @@ -1,8 +1,8 @@ # hcsshim -[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master) +[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) -This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. @@ -16,6 +16,11 @@ When you submit a pull request, a CLA-bot will automatically determine whether y a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. +We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project. + + +## Code of Conduct + This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/registry-library/vendor/github.com/Microsoft/hcsshim/appveyor.yml deleted file mode 100644 index 661bc406..00000000 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/appveyor.yml +++ /dev/null @@ -1,41 +0,0 @@ -version: 0.1.{build} - -image: Visual Studio 2017 - -clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim - -environment: - GOPATH: c:\gopath - PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH% - -stack: go 1.13.4 - -build_script: - - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip - - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL - - gometalinter.exe --config .gometalinter.json ./... - - go build ./cmd/containerd-shim-runhcs-v1 - - go build ./cmd/runhcs - - go build ./cmd/tar2ext4 - - go build ./cmd/wclayer - - go build ./internal/tools/grantvmgroupaccess - - go build ./internal/tools/uvmboot - - go build ./internal/tools/zapdir - - go test -v ./... -tags admin - - go test -c ./test/containerd-shim-runhcs-v1/ -tags functional - - go test -c ./test/cri-containerd/ -tags functional - - go test -c ./test/functional/ -tags functional - - go test -c ./test/runhcs/ -tags functional - -artifacts: - - path: 'containerd-shim-runhcs-v1.exe' - - path: 'runhcs.exe' - - path: 'tar2ext4.exe' - - path: 'wclayer.exe' - - path: 'grantvmgroupaccess.exe' - - path: 'uvmboot.exe' - - path: 'zapdir.exe' - - path: 'containerd-shim-runhcs-v1.test.exe' - - path: 'cri-containerd.test.exe' - - path: 'functional.test.exe' - - path: 'runhcs.test.exe' \ No newline at end of file diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 00000000..7f1f2823 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.AttachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 00000000..8e28e6c5 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 00000000..43547325 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DetachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 00000000..a1b12dd1 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,46 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim.ExportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldbytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + obytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 00000000..83c0fa33 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim.FormatWritableLayerVhd" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 00000000..87fee452 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,193 @@ +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/pkg/security" + "github.com/Microsoft/go-winio/vhd" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const defaultVHDXBlockSizeInMB = 1 + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 00000000..0c61dab3 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,41 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim.ImportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 00000000..53ed8ea6 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.InitializeWritableLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 00000000..fcdbbef8 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,27 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim.GetLayerVhdMountPath" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 00000000..ca7b40ef --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,74 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim.SetupBaseOSLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Get().Build < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim.SetupBaseOSVolume" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 00000000..9cd283d4 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,50 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/schema2" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 00000000..4f951806 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,319 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + if hr = procHcsDestoryLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/errors.go b/registry-library/vendor/github.com/Microsoft/hcsshim/errors.go index 63efa23c..79430867 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/errors.go @@ -83,7 +83,6 @@ type NetworkNotFoundError = hns.NetworkNotFoundError type ProcessError struct { Process *process Operation string - ExtraInfo string Err error Events []hcs.ErrorEvent } @@ -92,7 +91,6 @@ type ProcessError struct { type ContainerError struct { Container *container Operation string - ExtraInfo string Err error Events []hcs.ErrorEvent } @@ -125,22 +123,9 @@ func (e *ContainerError) Error() string { s += "\n" + ev.String() } - if e.ExtraInfo != "" { - s += " extra info: " + e.ExtraInfo - } - return s } -func makeContainerError(container *container, operation string, extraInfo string, err error) error { - // Don't double wrap errors - if _, ok := err.(*ContainerError); ok { - return err - } - containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err} - return containerError -} - func (e *ProcessError) Error() string { if e == nil { return "" @@ -171,15 +156,6 @@ func (e *ProcessError) Error() string { return s } -func makeProcessError(process *process, operation string, extraInfo string, err error) error { - // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { - return err - } - processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err} - return processError -} - // IsNotExist checks if an error is caused by the Container or Process not existing. // Note: Currently, ErrElementNotFound can mean that a Process has either // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist @@ -230,6 +206,18 @@ func IsNotSupported(err error) bool { return hcs.IsNotSupported(getInnerError(err)) } +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return hcs.IsOperationInvalidState(getInnerError(err)) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return hcs.IsAccessIsDenied(getInnerError(err)) +} + func getInnerError(err error) error { switch pe := err.(type) { case nil: @@ -244,7 +232,7 @@ func getInnerError(err error) error { func convertSystemError(err error, c *container) error { if serr, ok := err.(*hcs.SystemError); ok { - return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events} + return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} } return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/go.mod b/registry-library/vendor/github.com/Microsoft/hcsshim/go.mod index 72d253da..6e52f732 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/go.mod +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/go.mod @@ -3,35 +3,20 @@ module github.com/Microsoft/hcsshim go 1.13 require ( - github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 - github.com/blang/semver v3.1.0+incompatible // indirect - github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f - github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 - github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 - github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect - github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect - github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 - github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de - github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd - github.com/gogo/protobuf v1.2.1 - github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect - github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 // indirect - github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 // indirect - github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f // indirect - github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 - github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 - github.com/pkg/errors v0.8.1 - github.com/prometheus/procfs v0.0.5 // indirect - github.com/sirupsen/logrus v1.4.1 - github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect - github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f // indirect - go.opencensus.io v0.22.0 - golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 - golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 - google.golang.org/grpc v1.20.1 - gotest.tools v2.2.0+incompatible // indirect - k8s.io/kubernetes v1.13.0 + github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 + github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 + github.com/containerd/console v1.0.1 + github.com/containerd/containerd v1.5.0-beta.4 + github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0 + github.com/containerd/ttrpc v1.0.2 + github.com/containerd/typeurl v1.0.1 + github.com/gogo/protobuf v1.3.2 + github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + github.com/urfave/cli v1.22.2 + go.opencensus.io v0.22.3 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + golang.org/x/sys v0.0.0-20210324051608-47abb6519492 + google.golang.org/grpc v1.33.2 ) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/go.sum b/registry-library/vendor/github.com/Microsoft/hcsshim/go.sum index 578b78e8..545bb60b 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/go.sum +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/go.sum @@ -1,131 +1,851 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 h1:rG1clvJbgsUcmb50J82YUJhUMopWNtZvyMZjb+4fqGw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1 h1:u7SFAJyRqWcG6ogaMAx3KjSTy1e3hT9QxqX7Jco7dRc= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d h1:u6sWqdNGAy7+O8qG/r1dqdnZE7IdEjteK3WGuvbfreo= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0 h1:e+50zk22gvHLJKe8+d+xSMyA88PPQk/XfWuUw1BdnPA= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1 h1:PvuK4E3D5S5q6IqsPDCy928FhP0LUIGcmZ/Yhgp5Djw= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874 h1:cAv7ZbSmyb1wjn6T4TIiyFCkpcfgpbcNNC3bM2srLaI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f h1:a969LJ4IQFwRHYqonHtUDMSh9i54WcKggeEkQ3fZMl4= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/registry-library/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 09b3860a..40831267 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -40,6 +40,9 @@ func HNSListEndpointRequest() ([]HNSEndpoint, error) { // HotAttachEndpoint makes a HCS Call to attach the endpoint to the container func HotAttachEndpoint(containerID string, endpointID string) error { endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } isAttached, err := endpoint.IsAttached(containerID) if isAttached { return err @@ -50,6 +53,9 @@ func HotAttachEndpoint(containerID string, endpointID string) error { // HotDetachEndpoint makes a HCS Call to detach the endpoint from the container func HotDetachEndpoint(containerID string, endpointID string) error { endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } isAttached, err := endpoint.IsAttached(containerID) if !isAttached { return err diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/registry-library/vendor/github.com/Microsoft/hcsshim/hnspolicy.go index a3e03ff8..00ab2636 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/hnspolicy.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -21,8 +21,11 @@ const ( OutboundNat = hns.OutboundNat ExternalLoadBalancer = hns.ExternalLoadBalancer Route = hns.Route + Proxy = hns.Proxy ) +type ProxyPolicy = hns.ProxyPolicy + type NatPolicy = hns.NatPolicy type QosPolicy = hns.QosPolicy diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go index 8193315f..b8e2d4f3 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -80,4 +80,6 @@ type Container interface { // container to be terminated by some error condition (including calling // Close). Wait() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go index 62ba8175..cebbe75a 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -106,6 +106,7 @@ func newSystemChannels() notificationChannels { hcsNotificationSystemStartCompleted, hcsNotificationSystemPauseCompleted, hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, } { channels[notif] = make(notificationChannel, 1) } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go deleted file mode 100644 index 3669c34a..00000000 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -package hcs - -import "C" - -// This import is needed to make the library compile as CGO because HCSSHIM -// only works with CGO due to callbacks from HCS comming back from a C thread -// which is not supported without CGO. See https://github.com/golang/go/issues/10973 diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 9a4705a4..7696e4b4 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -171,7 +171,6 @@ type SystemError struct { ID string Op string Err error - Extra string Events []ErrorEvent } @@ -182,9 +181,6 @@ func (e *SystemError) Error() string { for _, ev := range e.Events { s += "\n" + ev.String() } - if e.Extra != "" { - s += "\n(extra info: " + e.Extra + ")" - } return s } @@ -198,7 +194,7 @@ func (e *SystemError) Timeout() bool { return ok && err.Timeout() } -func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error { +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { // Don't double wrap errors if _, ok := err.(*SystemError); ok { return err @@ -206,7 +202,6 @@ func makeSystemError(system *System, op string, extra string, err error, events return &SystemError{ ID: system.ID(), Op: op, - Extra: extra, Err: err, Events: events, } @@ -312,6 +307,13 @@ func IsOperationInvalidState(err error) bool { return err == ErrVmcomputeOperationInvalidState } +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationAccessIsDenied +} + func getInnerError(err error) error { switch pe := err.(type) { case nil: @@ -325,12 +327,3 @@ func getInnerError(err error) error { } return err } - -func getOperationLogResult(err error) (string, error) { - switch err { - case nil: - return "Success", nil - default: - return "Error", err - } -} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 2ad978f2..b74389ec 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -64,11 +64,7 @@ type processStatus struct { LastWaitResult int32 } -const ( - stdIn string = "StdIn" - stdOut string = "StdOut" - stdErr string = "StdErr" -) +const stdIn string = "StdIn" const ( modifyConsoleSize string = "ConsoleSize" @@ -176,8 +172,10 @@ func (process *Process) waitBackground() { trace.Int64Attribute("pid", int64(process.processID))) var ( - err error - exitCode = -1 + err error + exitCode = -1 + propertiesJSON string + resultJSON string ) err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) @@ -190,15 +188,15 @@ func (process *Process) waitBackground() { // Make sure we didnt race with Close() here if process.handle != 0 { - propertiesJSON, resultJSON, err := vmcompute.HcsGetProcessProperties(ctx, process.handle) + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) events := processHcsResult(ctx, resultJSON) if err != nil { - err = makeProcessError(process, operation, err, events) + err = makeProcessError(process, operation, err, events) //nolint:ineffassign } else { properties := &processStatus{} err = json.Unmarshal([]byte(propertiesJSON), properties) if err != nil { - err = makeProcessError(process, operation, err, nil) + err = makeProcessError(process, operation, err, nil) //nolint:ineffassign } else { if properties.LastWaitResult != 0 { log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") @@ -468,7 +466,7 @@ func (process *Process) unregisterCallback(ctx context.Context) error { delete(callbackMap, callbackNumber) callbackMapLock.Unlock() - handle = 0 + handle = 0 //nolint:ineffassign return nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 00000000..3a5f0125 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,49 @@ +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcsshim::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcsshim::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 6300a797..cea11dee 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -4,12 +4,9 @@ import ( "context" "encoding/json" "errors" - "os" - "strconv" "strings" "sync" "syscall" - "time" "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/log" @@ -21,27 +18,6 @@ import ( "go.opencensus.io/trace" ) -// currentContainerStarts is used to limit the number of concurrent container -// starts. -var currentContainerStarts containerStarts - -type containerStarts struct { - maxParallel int - inProgress int - sync.Mutex -} - -func init() { - mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START") - if len(mpsS) > 0 { - mpsI, err := strconv.Atoi(mpsS) - if err != nil || mpsI < 0 { - return - } - currentContainerStarts.maxParallel = mpsI - } -} - type System struct { handleLock sync.RWMutex handle vmcompute.HcsSystem @@ -52,8 +28,7 @@ type System struct { waitBlock chan struct{} waitError error exitError error - - os, typ string + os, typ string } func newSystem(id string) *System { @@ -98,8 +73,8 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in if err = computeSystem.registerCallback(ctx); err != nil { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. - computeSystem.Terminate(ctx) - return nil, makeSystemError(computeSystem, operation, "", err, nil) + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) } } @@ -108,9 +83,9 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in if err == ErrTimeout { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. - computeSystem.Terminate(ctx) + _ = computeSystem.Terminate(ctx) } - return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } go computeSystem.waitBackground() if err = computeSystem.getCachedProperties(ctx); err != nil { @@ -127,7 +102,7 @@ func OpenComputeSystem(ctx context.Context, id string) (*System, error) { handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } computeSystem.handle = handle defer func() { @@ -136,7 +111,7 @@ func OpenComputeSystem(ctx context.Context, id string) (*System, error) { } }() if err = computeSystem.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } go computeSystem.waitBackground() if err = computeSystem.getCachedProperties(ctx); err != nil { @@ -212,39 +187,13 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { defer computeSystem.handleLock.RUnlock() if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) - } - - // This is a very simple backoff-retry loop to limit the number - // of parallel container starts if environment variable - // HCSSHIM_MAX_PARALLEL_START is set to a positive integer. - // It should generally only be used as a workaround to various - // platform issues that exist between RS1 and RS4 as of Aug 2018 - if currentContainerStarts.maxParallel > 0 { - for { - currentContainerStarts.Lock() - if currentContainerStarts.inProgress < currentContainerStarts.maxParallel { - currentContainerStarts.inProgress++ - currentContainerStarts.Unlock() - break - } - if currentContainerStarts.inProgress == currentContainerStarts.maxParallel { - currentContainerStarts.Unlock() - time.Sleep(100 * time.Millisecond) - } - } - // Make sure we decrement the count when we are done. - defer func() { - currentContainerStarts.Lock() - currentContainerStarts.inProgress-- - currentContainerStarts.Unlock() - }() + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) if err != nil { - return makeSystemError(computeSystem, operation, "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil @@ -271,7 +220,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { switch err { case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: - return makeSystemError(computeSystem, operation, "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil } @@ -292,7 +241,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { switch err { case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: - return makeSystemError(computeSystem, operation, "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil } @@ -314,10 +263,10 @@ func (computeSystem *System) waitBackground() { log.G(ctx).Debug("system exited") case ErrVmcomputeUnexpectedExit: log.G(ctx).Debug("unexpected system exit") - computeSystem.exitError = makeSystemError(computeSystem, operation, "", err, nil) + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) err = nil default: - err = makeSystemError(computeSystem, operation, "", err, nil) + err = makeSystemError(computeSystem, operation, err, nil) } computeSystem.closedWaitOnce.Do(func() { computeSystem.waitError = err @@ -355,13 +304,13 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } if propertiesJSON == "" { @@ -369,7 +318,7 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr } properties := &schema1.ContainerProperties{} if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } return properties, nil @@ -384,13 +333,13 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } if propertiesJSON == "" { @@ -398,7 +347,7 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem } properties := &hcsschema.Properties{} if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } return properties, nil @@ -419,13 +368,13 @@ func (computeSystem *System) Pause(ctx context.Context) (err error) { defer computeSystem.handleLock.RUnlock() if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) if err != nil { - return makeSystemError(computeSystem, operation, "", err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil @@ -446,13 +395,45 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) { defer computeSystem.handleLock.RUnlock() if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) if err != nil { - return makeSystemError(computeSystem, operation, "", err, events) + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcsshim::System::Save" + + // hcsSaveComputeSystemContext is an async peration. Start the outer span + // here to measure the full save time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) } return nil @@ -463,19 +444,19 @@ func (computeSystem *System) createProcess(ctx context.Context, operation string defer computeSystem.handleLock.RUnlock() if computeSystem.handle == 0 { - return nil, nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } configurationb, err := json.Marshal(c) if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, nil, makeSystemError(computeSystem, operation, err, nil) } configuration := string(configurationb) processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, configuration, err, events) + return nil, nil, makeSystemError(computeSystem, operation, err, events) } log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") @@ -497,7 +478,7 @@ func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) ( pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } process.stdin = pipes[0] process.stdout = pipes[1] @@ -505,7 +486,7 @@ func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) ( process.hasCachedStdio = true if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } go process.waitBackground() @@ -520,18 +501,18 @@ func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process operation := "hcsshim::System::OpenProcess" if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) events := processHcsResult(ctx, resultJSON) if err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, events) + return nil, makeSystemError(computeSystem, operation, err, events) } process := newProcess(processHandle, pid, computeSystem) if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, "", err, nil) + return nil, makeSystemError(computeSystem, operation, err, nil) } go process.waitBackground() @@ -555,12 +536,12 @@ func (computeSystem *System) Close() (err error) { } if err = computeSystem.unregisterCallback(ctx); err != nil { - return makeSystemError(computeSystem, operation, "", err, nil) + return makeSystemError(computeSystem, operation, err, nil) } err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) if err != nil { - return makeSystemError(computeSystem, operation, "", err, nil) + return makeSystemError(computeSystem, operation, err, nil) } computeSystem.handle = 0 @@ -624,7 +605,7 @@ func (computeSystem *System) unregisterCallback(ctx context.Context) error { delete(callbackMap, callbackNumber) callbackMapLock.Unlock() - handle = 0 + handle = 0 //nolint:ineffassign return nil } @@ -637,7 +618,7 @@ func (computeSystem *System) Modify(ctx context.Context, config interface{}) err operation := "hcsshim::System::Modify" if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, "", ErrAlreadyClosed, nil) + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } requestBytes, err := json.Marshal(config) @@ -649,7 +630,7 @@ func (computeSystem *System) Modify(ctx context.Context, config interface{}) err resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) events := processHcsResult(ctx, resultJSON) if err != nil { - return makeSystemError(computeSystem, operation, requestJSON, err, events) + return makeSystemError(computeSystem, operation, err, events) } return nil diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go index a638677e..3342e5bb 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -1,10 +1,15 @@ package hcs import ( + "context" "io" "syscall" "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" ) // makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles @@ -31,3 +36,27 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { } return fs, nil } + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go index f07f532c..db4e14fd 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -65,5 +65,4 @@ func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNo case <-c: return ErrTimeout } - return nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 6a1c41e1..b36315a3 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -17,12 +17,15 @@ type HNSEndpoint struct { Policies []json.RawMessage `json:",omitempty"` MacAddress string `json:",omitempty"` IPAddress net.IP `json:",omitempty"` + IPv6Address net.IP `json:",omitempty"` DNSSuffix string `json:",omitempty"` DNSServerList string `json:",omitempty"` GatewayAddress string `json:",omitempty"` + GatewayAddressV6 string `json:",omitempty"` EnableInternalDNS bool `json:",omitempty"` DisableICC bool `json:",omitempty"` PrefixLength uint8 `json:",omitempty"` + IPv6PrefixLength uint8 `json:",omitempty"` IsRemoteEndpoint bool `json:",omitempty"` EnableLowMetric bool `json:",omitempty"` Namespace *Namespace `json:",omitempty"` @@ -173,6 +176,27 @@ func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { return err } +// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { + operation := "ApplyProxyPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + // ContainerAttach attaches an endpoint to container func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { operation := "ContainerAttach" diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go index b7ae96fd..f12d3ab0 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -39,12 +39,6 @@ type HNSNetwork struct { AutomaticDNS bool `json:",omitempty"` } -type hnsNetworkResponse struct { - Success bool - Error string - Output HNSNetwork -} - type hnsResponse struct { Success bool Error string diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 61da242e..6765aaea 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -17,6 +17,7 @@ const ( OutboundNat PolicyType = "OutBoundNAT" ExternalLoadBalancer PolicyType = "ELB" Route PolicyType = "ROUTE" + Proxy PolicyType = "PROXY" ) type NatPolicy struct { @@ -60,6 +61,15 @@ type OutboundNatPolicy struct { Destinations []string `json:",omitempty"` } +type ProxyPolicy struct { + Type PolicyType `json:"Type"` + IP string `json:",omitempty"` + Port string `json:",omitempty"` + ExceptionList []string `json:",omitempty"` + Destination string `json:",omitempty"` + OutboundNat bool `json:",omitempty"` +} + type ActionType string type DirectionType string type RuleType string diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index 45e2281b..d3b04eef 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -27,9 +27,10 @@ type namespaceResourceRequest struct { } type Namespace struct { - ID string - IsDefault bool `json:",omitempty"` - ResourceList []NamespaceResource `json:",omitempty"` + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` + CompartmentId uint32 `json:",omitempty"` } func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index f31edfaf..66b8d7e0 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -11,72 +11,11 @@ import ( "unsafe" "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/winapi" winio "github.com/Microsoft/go-winio" ) -//go:generate go run $GOROOT\src\syscall\mksyscall_windows.go -output zsyscall_windows.go safeopen.go - -//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile -//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile -//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc -//sys localFree(ptr uintptr) = kernel32.LocalFree - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName uintptr - Attributes uintptr - SecurityDescriptor uintptr - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type fileLinkInformation struct { - ReplaceIfExists bool - RootDirectory uintptr - FileNameLength uint32 - FileName [1]uint16 -} - -type fileDispositionInformationEx struct { - Flags uintptr -} - -const ( - _FileLinkInformation = 11 - _FileDispositionInformationEx = 64 - - FILE_READ_ATTRIBUTES = 0x0080 - FILE_WRITE_ATTRIBUTES = 0x0100 - DELETE = 0x10000 - - FILE_OPEN = 1 - FILE_CREATE = 2 - - FILE_DIRECTORY_FILE = 0x00000001 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_DELETE_ON_CLOSE = 0x00001000 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - - FILE_DISPOSITION_DELETE = 0x00000001 - - _OBJ_DONT_REPARSE = 0x1000 - - _STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B -) - func OpenRoot(path string) (*os.File, error) { longpath, err := longpath.LongAbs(path) if err != nil { @@ -85,16 +24,24 @@ func OpenRoot(path string) (*os.File, error) { return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) } -func ntRelativePath(path string) ([]uint16, error) { +func cleanGoStringRelativePath(path string) (string, error) { path = filepath.Clean(path) if strings.Contains(path, ":") { // Since alternate data streams must follow the file they // are attached to, finding one here (out of order) is invalid. - return nil, errors.New("path contains invalid character `:`") + return "", errors.New("path contains invalid character `:`") } fspath := filepath.FromSlash(path) if len(fspath) > 0 && fspath[0] == '\\' { - return nil, errors.New("expected relative path") + return "", errors.New("expected relative path") + } + return fspath, nil +} + +func ntRelativePath(path string) ([]uint16, error) { + fspath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err } path16 := utf16.Encode(([]rune)(fspath)) @@ -110,11 +57,11 @@ func ntRelativePath(path string) ([]uint16, error) { func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { var ( h uintptr - iosb ioStatusBlock - oa objectAttributes + iosb winapi.IOStatusBlock + oa winapi.ObjectAttributes ) - path16, err := ntRelativePath(path) + cleanRelativePath, err := cleanGoStringRelativePath(path) if err != nil { return nil, err } @@ -123,20 +70,16 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl return nil, errors.New("missing root directory") } - upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2) - defer localFree(upathBuffer) - - upath := (*unicodeString)(unsafe.Pointer(upathBuffer)) - upath.Length = uint16(len(path16) * 2) - upath.MaximumLength = upath.Length - upath.Buffer = upathBuffer + unsafe.Sizeof(*upath) - copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16) + pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) + if err != nil { + return nil, err + } oa.Length = unsafe.Sizeof(oa) - oa.ObjectName = upathBuffer + oa.ObjectName = pathUnicode oa.RootDirectory = uintptr(root.Fd()) - oa.Attributes = _OBJ_DONT_REPARSE - status := ntCreateFile( + oa.Attributes = winapi.OBJ_DONT_REPARSE + status := winapi.NtCreateFile( &h, accessMask|syscall.SYNCHRONIZE, &oa, @@ -145,12 +88,12 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl 0, shareFlags, createDisposition, - FILE_OPEN_FOR_BACKUP_INTENT|FILE_SYNCHRONOUS_IO_NONALERT|flags, + winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, nil, 0, ) if status != 0 { - return nil, rtlNtStatusToDosError(status) + return nil, winapi.RtlNtStatusToDosError(status) } fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) @@ -182,7 +125,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. oldroot, syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, + winapi.FILE_OPEN, 0, ) if err != nil { @@ -199,8 +142,8 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. newroot, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_DIRECTORY_FILE) + winapi.FILE_OPEN, + winapi.FILE_DIRECTORY_FILE) if err != nil { return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} } @@ -211,7 +154,7 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. return err } if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)} + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} } } else { @@ -227,24 +170,25 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. return err } - size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2 - linkinfoBuffer := localAlloc(0, size) - defer localFree(linkinfoBuffer) - linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := winapi.LocalAlloc(0, size) + defer winapi.LocalFree(linkinfoBuffer) + + linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) linkinfo.RootDirectory = parent.Fd() linkinfo.FileNameLength = uint32(len(newbase16) * 2) - copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16) + copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) - var iosb ioStatusBlock - status := ntSetInformationFile( + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( oldf.Fd(), &iosb, linkinfoBuffer, uint32(size), - _FileLinkInformation, + winapi.FileLinkInformationClass, ) if status != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)} + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} } return nil @@ -252,17 +196,17 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. // deleteOnClose marks a file to be deleted when the handle is closed. func deleteOnClose(f *os.File) error { - disposition := fileDispositionInformationEx{Flags: FILE_DISPOSITION_DELETE} - var iosb ioStatusBlock - status := ntSetInformationFile( + disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( f.Fd(), &iosb, uintptr(unsafe.Pointer(&disposition)), uint32(unsafe.Sizeof(disposition)), - _FileDispositionInformationEx, + winapi.FileDispositionInformationExClass, ) if status != 0 { - return rtlNtStatusToDosError(status) + return winapi.RtlNtStatusToDosError(status) } return nil } @@ -291,16 +235,16 @@ func RemoveRelative(path string, root *os.File) error { f, err := openRelativeInternal( path, root, - FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|DELETE, + winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err == nil { defer f.Close() err = deleteOnClose(f) if err == syscall.ERROR_ACCESS_DENIED { // Maybe the file is marked readonly. Clear the bit and retry. - clearReadOnly(f) + _ = clearReadOnly(f) err = deleteOnClose(f) } } @@ -385,8 +329,8 @@ func MkdirRelative(path string, root *os.File) error { root, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_CREATE, - FILE_DIRECTORY_FILE) + winapi.FILE_CREATE, + winapi.FILE_DIRECTORY_FILE) if err == nil { f.Close() } else { @@ -401,10 +345,10 @@ func LstatRelative(path string, root *os.File) (os.FileInfo, error) { f, err := openRelativeInternal( path, root, - FILE_READ_ATTRIBUTES, + winapi.FILE_READ_ATTRIBUTES, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, - FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err != nil { return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} } @@ -421,7 +365,7 @@ func EnsureNotReparsePointRelative(path string, root *os.File) error { root, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - FILE_OPEN, + winapi.FILE_OPEN, 0) if err != nil { return err diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go deleted file mode 100644 index 709b9d34..00000000 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package safefile - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") -) - -func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) - status = uint32(r0) - return -} - -func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) - status = uint32(r0) - return -} - -func rtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func localAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) - ptr = uintptr(r0) - return -} - -func localFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) - return -} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go index fb23617f..b468ad63 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go @@ -119,9 +119,9 @@ type PropertyType string const ( PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 - PropertyTypeProcessList = "ProcessList" // V1 and V2 - PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" // Not supported in V2 schema call - PropertyTypeGuestConnection = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 ) type PropertyQuery struct { @@ -214,9 +214,11 @@ type MappedVirtualDiskController struct { // GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM type GuestDefinedCapabilities struct { - NamespaceAddRequestSupported bool `json:",omitempty"` - SignalProcessSupported bool `json:",omitempty"` - DumpStacksSupported bool `json:",omitempty"` + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` } // GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 00000000..495c6ebc --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 00000000..1ed4c008 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go new file mode 100644 index 00000000..d7ebd0fc --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 00000000..71005b09 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go new file mode 100644 index 00000000..952cda49 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go new file mode 100644 index 00000000..32e5a3be --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_remove_instance_request.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go new file mode 100644 index 00000000..ea306fa2 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group.go new file mode 100644 index 00000000..90332a51 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_affinity.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_affinity.go new file mode 100644 index 00000000..8794961b --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_config.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_config.go new file mode 100644 index 00000000..f1a28cd3 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_configurations.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_configurations.go new file mode 100644 index 00000000..3ace0ccc --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_operations.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_operations.go new file mode 100644 index 00000000..7d897807 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_property.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_property.go new file mode 100644 index 00000000..bbad6a2c --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/cpu_group_property.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/create_group_operation.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/create_group_operation.go new file mode 100644 index 00000000..91a8278f --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/delete_group_operation.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/delete_group_operation.go new file mode 100644 index 00000000..134bd988 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go index ca319bbb..107cadda 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go @@ -9,8 +9,19 @@ package hcsschema -type Device struct { +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstance DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) - // The interface class guid of the device to assign to container. +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstance. + LocationPath string `json:"LocationPath,omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go index 781a8840..e985d96d 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go @@ -39,4 +39,8 @@ type Devices struct { FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/host_processor_modify_request.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/host_processor_modify_request.go new file mode 100644 index 00000000..2238ce53 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go new file mode 100644 index 00000000..84c11b93 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go index a848e91e..ecd9f7fb 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go @@ -19,4 +19,10 @@ type HvSocketServiceConfig struct { // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/interrupt_moderation_mode.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/interrupt_moderation_mode.go new file mode 100644 index 00000000..a614d63b --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/iov_settings.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/iov_settings.go new file mode 100644 index 00000000..2a55cc37 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go new file mode 100644 index 00000000..2e3aa5e1 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go index ec93d004..30749c67 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go @@ -10,5 +10,5 @@ package hcsschema type Memory struct { - SizeInMB int32 `json:"SizeInMB,omitempty"` + SizeInMB uint64 `json:"SizeInMB,omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go index b4a36954..71224c75 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go @@ -10,7 +10,7 @@ package hcsschema type Memory2 struct { - SizeInMB int32 `json:"SizeInMB,omitempty"` + SizeInMB uint64 `json:"SizeInMB,omitempty"` AllowOvercommit bool `json:"AllowOvercommit,omitempty"` @@ -27,4 +27,23 @@ type Memory2 struct { // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by // the guest operating system). EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go new file mode 100644 index 00000000..1384ed88 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go index a9c750b3..7408abd3 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go @@ -11,6 +11,7 @@ package hcsschema type NetworkAdapter struct { EndpointId string `json:"EndpointId,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go new file mode 100644 index 00000000..885156e7 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go index f092b737..98f2c96e 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_type.go @@ -18,6 +18,9 @@ const ( PTProcessList PropertyType = "ProcessList" PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. PTGuestConnection PropertyType = "GuestConnection" PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" ) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go new file mode 100644 index 00000000..b8142ca6 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go new file mode 100644 index 00000000..f5e05903 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go new file mode 100644 index 00000000..cedb7d18 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_processor_limits.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_processor_limits.go new file mode 100644 index 00000000..de1b9cf1 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go index ff3b6572..eaf39fa5 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -29,6 +29,9 @@ var ( // SystemResume is the timeout for resuming a compute system SystemResume time.Duration = defaultTimeout + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. SyscallWatcher time.Duration = defaultTimeout @@ -51,6 +54,7 @@ func init() { SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 7c2a0dc2..e7f114b6 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -26,8 +26,10 @@ import ( //sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? //sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? //sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? //sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? //sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? //sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? //sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? @@ -60,7 +62,7 @@ type HcsCallback syscall.Handle type HcsProcessInformation struct { // ProcessId is the pid of the created process. ProcessId uint32 - reserved uint32 + reserved uint32 //nolint:structcheck // StdInput is the handle associated with the stdin of the process. StdInput syscall.Handle // StdOutput is the handle associated with the stdout of the process. @@ -337,6 +339,27 @@ func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, confi }) } +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") defer span.End() @@ -563,3 +586,25 @@ func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallba return hcsUnregisterProcessCallback(callbackHandle) }) } + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go index 0f2a69f6..cae55058 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -50,8 +50,10 @@ var ( procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") @@ -314,6 +316,29 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res return } +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { return @@ -342,6 +367,29 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { return } +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsSaveComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(processParameters) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index dcb91926..ff81ac2c 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -1,28 +1,23 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // ActivateLayer will find the layer with the given id and mount it's filesystem. // For a read/write layer, the mounted filesystem will appear as a volume on the // host, while a read-only layer is generally expected to be a no-op. // An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(path string) (err error) { +func ActivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::ActivateLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = activateLayer(&stdDriverInfo, path) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go index 5784241d..3ec708d1 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go @@ -1,6 +1,7 @@ package wclayer import ( + "context" "errors" "os" "path/filepath" @@ -8,10 +9,16 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "go.opencensus.io/trace" ) type baseLayerWriter struct { + ctx context.Context + s *trace.Span + root *os.File f *os.File bw *winio.BackupFileWriter @@ -31,7 +38,7 @@ type dirInfo struct { func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { for i := range dis { di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_OPEN, safefile.FILE_DIRECTORY_FILE) + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) if err != nil { return err } @@ -41,6 +48,7 @@ func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { if err != nil { return err } + } return nil } @@ -86,14 +94,12 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e extraFlags := uint32(0) if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - extraFlags |= safefile.FILE_DIRECTORY_FILE - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) - } + extraFlags |= winapi.FILE_DIRECTORY_FILE + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) } mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, extraFlags) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) if err != nil { return hcserror.New(err, "Failed to safefile.OpenRelative", name) } @@ -136,12 +142,15 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) { return n, err } -func (w *baseLayerWriter) Close() error { +func (w *baseLayerWriter) Close() (err error) { + defer w.s.End() + defer func() { oc.SetSpanStatus(w.s, err) }() defer func() { w.root.Close() w.root = nil }() - err := w.closeCurrentFile() + + err = w.closeCurrentFile() if err != nil { return err } @@ -153,7 +162,7 @@ func (w *baseLayerWriter) Close() error { return err } - err = ProcessBaseLayer(w.root.Name()) + err = ProcessBaseLayer(w.ctx, w.root.Name()) if err != nil { return err } @@ -163,7 +172,7 @@ func (w *baseLayerWriter) Close() error { if err != nil { return err } - err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) + err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) if err != nil { return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index be2bc3fd..ffee31ab 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -1,27 +1,23 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // CreateLayer creates a new, empty, read-only layer on the filesystem based on // the parent layer provided. -func CreateLayer(path, parent string) (err error) { +func CreateLayer(ctx context.Context, path, parent string) (err error) { title := "hcsshim::CreateLayer" - fields := logrus.Fields{ - "parent": parent, - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parent", parent)) err = createLayer(&stdDriverInfo, path, parent) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index 7e335128..5a3809ae 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -1,31 +1,27 @@ package wclayer import ( + "context" + "strings" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // CreateScratchLayer creates and populates new read-write layer for use by a container. -// This requires both the id of the direct parent layer, as well as the full list -// of paths to all parent layers up to the base (and including the direct parent -// whose id was provided). -func CreateScratchLayer(path string, parentLayerPaths []string) (err error) { +// This requires the full list of paths to all parent layers up to the base +func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::CreateScratchLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go index 2dd5d571..d5bf2f5b 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -1,25 +1,20 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(path string) (err error) { +func DeactivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DeactivateLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = deactivateLayer(&stdDriverInfo, path) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index 4da690c2..787054e7 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -1,26 +1,21 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // DestroyLayer will remove the on-disk files representing the layer with the given // path, including that layer's containing folder, if any. -func DestroyLayer(path string) (err error) { +func DestroyLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DestroyLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = destroyLayer(&stdDriverInfo, path) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index b3b431e3..93f27da8 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -1,32 +1,27 @@ package wclayer import ( + "context" "os" "path/filepath" "syscall" "unsafe" "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/osversion" - "github.com/sirupsen/logrus" + "go.opencensus.io/trace" ) // ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(path string, size uint64) (err error) { +func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { title := "hcsshim::ExpandScratchSize" - fields := logrus.Fields{ - "path": path, - "size": size, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.Int64Attribute("size", int64(size))) err = expandSandboxSize(&stdDriverInfo, path, size) if err != nil { @@ -36,7 +31,7 @@ func ExpandScratchSize(path string, size uint64) (err error) { // Manually expand the volume now in order to work around bugs in 19H1 and // prerelease versions of Vb. Remove once this is fixed in Windows. if build := osversion.Get().Build; build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(path) + err = expandSandboxVolume(ctx, path) if err != nil { return err } @@ -84,7 +79,7 @@ func attachVhd(path string) (syscall.Handle, error) { return handle, nil } -func expandSandboxVolume(path string) error { +func expandSandboxVolume(ctx context.Context, path string) error { // Mount the sandbox VHD temporarily. vhdPath := filepath.Join(path, "sandbox.vhdx") vhd, err := attachVhd(vhdPath) @@ -94,7 +89,7 @@ func expandSandboxVolume(path string) error { defer syscall.Close(vhd) // Open the volume. - volumePath, err := GetLayerMountPath(path) + volumePath, err := GetLayerMountPath(ctx, path) if err != nil { return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index 0425b339..09f0de1a 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -1,12 +1,15 @@ package wclayer import ( + "context" "io/ioutil" "os" + "strings" "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // ExportLayer will create a folder at exportFolderPath and fill that folder with @@ -14,24 +17,18 @@ import ( // format includes any metadata required for later importing the layer (using // ImportLayer), and requires the full list of parent layer paths in order to // perform the export. -func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) { +func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ExportLayer" - fields := logrus.Fields{ - "path": path, - "exportFolderPath": exportFolderPath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("exportFolderPath", exportFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } @@ -52,25 +49,46 @@ type LayerReader interface { // NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. // The caller must have taken the SeBackupPrivilege privilege // to call this and any methods on the resulting LayerReader. -func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) { +func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + exportPath, err := ioutil.TempDir("", "hcs") if err != nil { return nil, err } - err = ExportLayer(path, exportPath, parentLayerPaths) + err = ExportLayer(ctx, path, exportPath, parentLayerPaths) if err != nil { os.RemoveAll(exportPath) return nil, err } - return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil + return &legacyLayerReaderWrapper{ + ctx: ctx, + s: span, + legacyLayerReader: newLegacyLayerReader(exportPath), + }, nil } type legacyLayerReaderWrapper struct { + ctx context.Context + s *trace.Span + *legacyLayerReader } -func (r *legacyLayerReaderWrapper) Close() error { - err := r.legacyLayerReader.Close() +func (r *legacyLayerReaderWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + + err = r.legacyLayerReader.Close() os.RemoveAll(r.root) return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index d60b6ed5..4d22d0ec 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -1,36 +1,30 @@ package wclayer import ( + "context" "syscall" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GetLayerMountPath will look for a mounted layer with the given path and return // the path at which that layer can be accessed. This path may be a volume path // if the layer is a mounted read-write layer, otherwise it is expected to be the // folder path at which the layer is stored. -func GetLayerMountPath(path string) (_ string, err error) { +func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { title := "hcsshim::GetLayerMountPath" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() - - var mountPathLength uintptr - mountPathLength = 0 + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + var mountPathLength uintptr = 0 // Call the procedure itself. - logrus.WithFields(fields).Debug("Calling proc (1)") + log.G(ctx).Debug("Calling proc (1)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) if err != nil { return "", hcserror.New(err, title+" - failed", "(first call)") @@ -44,13 +38,13 @@ func GetLayerMountPath(path string) (_ string, err error) { mountPathp[0] = 0 // Call the procedure again - logrus.WithFields(fields).Debug("Calling proc (2)") + log.G(ctx).Debug("Calling proc (2)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) if err != nil { return "", hcserror.New(err, title+" - failed", "(second call)") } mountPath := syscall.UTF16ToString(mountPathp[0:]) - fields["mountPath"] = mountPath + span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) return mountPath, nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index dbd83ef2..bcc8fbd4 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -1,29 +1,29 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GetSharedBaseImages will enumerate the images stored in the common central // image store and return descriptive info about those images for the purpose // of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages() (imageData string, err error) { +func GetSharedBaseImages(ctx context.Context) (_ string, err error) { title := "hcsshim::GetSharedBaseImages" - logrus.Debug(title) - defer func() { - if err != nil { - logrus.WithError(err).Error(err) - } else { - logrus.WithField("imageData", imageData).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() var buffer *uint16 err = getBaseImages(&buffer) if err != nil { return "", hcserror.New(err, title+" - failed", "") } - return interop.ConvertAndFreeCoTaskMemString(buffer), nil + imageData := interop.ConvertAndFreeCoTaskMemString(buffer) + span.AddAttributes(trace.StringAttribute("imageData", imageData)) + return imageData, nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index 05735df6..3eaca278 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -1,26 +1,22 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(vmid string, filepath string) (err error) { +func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { title := "hcsshim::GrantVmAccess" - fields := logrus.Fields{ - "vm-id": vmid, - "path": filepath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("vm-id", vmid), + trace.StringAttribute("path", filepath)) err = grantVmAccess(vmid, filepath) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index 76a804f2..b3c150d6 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -1,38 +1,35 @@ package wclayer import ( + "context" "io/ioutil" "os" "path/filepath" + "strings" "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/sirupsen/logrus" + "go.opencensus.io/trace" ) // ImportLayer will take the contents of the folder at importFolderPath and import // that into a layer with the id layerId. Note that in order to correctly populate // the layer and interperet the transport format, all parent layers must already // be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) { +func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ImportLayer" - fields := logrus.Fields{ - "path": path, - "importFolderPath": importFolderPath, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("importFolderPath", importFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } @@ -60,20 +57,26 @@ type LayerWriter interface { } type legacyLayerWriterWrapper struct { + ctx context.Context + s *trace.Span + *legacyLayerWriter path string parentLayerPaths []string } -func (r *legacyLayerWriterWrapper) Close() error { +func (r *legacyLayerWriterWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() defer os.RemoveAll(r.root.Name()) defer r.legacyLayerWriter.CloseRoots() - err := r.legacyLayerWriter.Close() + + err = r.legacyLayerWriter.Close() if err != nil { return err } - if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { return err } for _, name := range r.Tombstones { @@ -90,13 +93,26 @@ func (r *legacyLayerWriterWrapper) Close() error { return err } } + + // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone + // deletion and hard link creation. This is because Tombstone deletion and hard link + // creation updates the directory last write timestamps so that will change the + // timestamps added by the `Add` call. Some container applications depend on the + // correctness of these timestamps and so we should change the timestamps back to + // the original value (i.e the value provided in the Add call) after this + // processing is done. + err = reapplyDirectoryTimes(r.destRoot, r.changedDi) + if err != nil { + return err + } + // Prepare the utility VM for use if one is present in the layer. if r.HasUtilityVM { err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) if err != nil { return err } - err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) + err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) if err != nil { return err } @@ -107,7 +123,18 @@ func (r *legacyLayerWriterWrapper) Close() error { // NewLayerWriter returns a new layer writer for creating a layer on disk. // The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges // to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) { +func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + if len(parentLayerPaths) == 0 { // This is a base layer. It gets imported differently. f, err := safefile.OpenRoot(path) @@ -115,6 +142,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) return nil, err } return &baseLayerWriter{ + ctx: ctx, + s: span, root: f, }, nil } @@ -128,6 +157,8 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) return nil, err } return &legacyLayerWriterWrapper{ + ctx: ctx, + s: span, legacyLayerWriter: w, path: importPath, parentLayerPaths: parentLayerPaths, diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index 258167a5..c6999973 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -1,26 +1,21 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // LayerExists will return true if a layer with the given id exists and is known // to the system. -func LayerExists(path string) (_ bool, err error) { +func LayerExists(ctx context.Context, path string) (_ bool, err error) { title := "hcsshim::LayerExists" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) // Call the procedure itself. var exists uint32 @@ -28,6 +23,6 @@ func LayerExists(path string) (_ bool, err error) { if err != nil { return false, hcserror.New(err, title+" - failed", "") } - fields["layer-exists"] = exists != 0 + span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) return exists != 0, nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go index 443596fb..0ce34a30 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -1,13 +1,22 @@ package wclayer import ( + "context" "path/filepath" "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // LayerID returns the layer ID of a layer on disk. -func LayerID(path string) (guid.GUID, error) { +func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { + title := "hcsshim::LayerID" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + _, file := filepath.Split(path) - return NameToGuid(file) + return NameToGuid(ctx, file) } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go index 06671309..1ec893c6 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -4,6 +4,7 @@ package wclayer // functionality. import ( + "context" "syscall" "github.com/Microsoft/go-winio/pkg/guid" @@ -68,12 +69,12 @@ type WC_LAYER_DESCRIPTOR struct { Pathp *uint16 } -func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { +func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { // Array of descriptors that gets constructed. var layers []WC_LAYER_DESCRIPTOR for i := 0; i < len(parentLayerPaths); i++ { - g, err := LayerID(parentLayerPaths[i]) + g, err := LayerID(ctx, parentLayerPaths[i]) if err != nil { logrus.WithError(err).Debug("Failed to convert name to guid") return nil, err diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index b8ea5d26..83ba72cf 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -15,6 +15,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/longpath" "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" ) var errorIterationCanceled = errors.New("") @@ -341,7 +342,7 @@ type legacyLayerWriter struct { backupWriter *winio.BackupFileWriter Tombstones []string HasUtilityVM bool - uvmDi []dirInfo + changedDi []dirInfo addedFiles map[string]bool PendingLinks []pendingLink pendingDirs []pendingDir @@ -389,7 +390,7 @@ func (w *legacyLayerWriter) CloseRoots() { w.destRoot = nil } for i := range w.parentRoots { - w.parentRoots[i].Close() + _ = w.parentRoots[i].Close() } w.parentRoots = nil } @@ -472,8 +473,8 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool srcRoot, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, - safefile.FILE_OPEN, - safefile.FILE_OPEN_REPARSE_POINT) + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) if err != nil { return nil, err } @@ -488,14 +489,14 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool extraFlags := uint32(0) if isDir { - extraFlags |= safefile.FILE_DIRECTORY_FILE + extraFlags |= winapi.FILE_DIRECTORY_FILE } dest, err := safefile.OpenRelative( subPath, destRoot, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, - safefile.FILE_CREATE, + winapi.FILE_CREATE, extraFlags) if err != nil { return nil, err @@ -555,7 +556,7 @@ func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles if err != nil { return err } - if isDir && !isReparsePoint { + if isDir { di = append(di, dirInfo{path: relPath, fileInfo: *fi}) } } else { @@ -583,6 +584,10 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return w.initUtilityVM() } + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + name = filepath.Clean(name) if hasPathPrefix(name, utilityVMPath) { if !w.HasUtilityVM { @@ -591,7 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { return errors.New("invalid UtilityVM layer") } - createDisposition := uint32(safefile.FILE_OPEN) + createDisposition := uint32(winapi.FILE_OPEN) if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { st, err := safefile.LstatRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { @@ -612,16 +617,13 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return err } } - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo}) - } } else { // Overwrite any existing hard link. err := safefile.RemoveRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { return err } - createDisposition = safefile.FILE_CREATE + createDisposition = winapi.FILE_CREATE } f, err := safefile.OpenRelative( @@ -630,7 +632,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, syscall.FILE_SHARE_READ, createDisposition, - safefile.FILE_OPEN_REPARSE_POINT, + winapi.FILE_OPEN_REPARSE_POINT, ) if err != nil { return err @@ -638,7 +640,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro defer func() { if f != nil { f.Close() - safefile.RemoveRelative(name, w.destRoot) + _ = safefile.RemoveRelative(name, w.destRoot) } }() @@ -667,14 +669,14 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro w.currentIsDir = true } - f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0) + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) if err != nil { return err } defer func() { if f != nil { f.Close() - safefile.RemoveRelative(fname, w.root) + _ = safefile.RemoveRelative(fname, w.root) } }() @@ -805,11 +807,5 @@ func (w *legacyLayerWriter) Close() error { return err } } - if w.HasUtilityVM { - err := reapplyDirectoryTimes(w.destRoot, w.uvmDi) - if err != nil { - return err - } - } return nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index a259c1b8..bcf39c6b 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -1,34 +1,29 @@ package wclayer import ( + "context" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // NameToGuid converts the given string into a GUID using the algorithm in the // Host Compute Service, ensuring GUIDs generated with the same string are common // across all clients. -func NameToGuid(name string) (id guid.GUID, err error) { +func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { title := "hcsshim::NameToGuid" - fields := logrus.Fields{ - "name": name, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("name", name)) + var id guid.GUID err = nameToGuid(name, &id) if err != nil { - err = hcserror.New(err, title+" - failed", "") - return + return guid.GUID{}, hcserror.New(err, title+" - failed", "") } - fields["guid"] = id.String() - return + span.AddAttributes(trace.StringAttribute("guid", id.String())) + return id, nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index 2b65b018..55f7730d 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -1,10 +1,13 @@ package wclayer import ( + "context" + "strings" "sync" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) var prepareLayerLock sync.Mutex @@ -14,23 +17,17 @@ var prepareLayerLock sync.Mutex // parent layers, and is necessary in order to view or interact with the layer // as an actual filesystem (reading and writing files, creating directories, etc). // Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(path string, parentLayerPaths []string) (err error) { +func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::PrepareLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) if err != nil { return err } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go index 884207c3..30bcdff5 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -1,23 +1,41 @@ package wclayer -import "os" +import ( + "context" + "os" + + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) // ProcessBaseLayer post-processes a base layer that has had its files extracted. // The files should have been extracted to \Files. -func ProcessBaseLayer(path string) error { - err := processBaseImage(path) +func ProcessBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessBaseLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processBaseImage(path) if err != nil { - return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} + return &os.PathError{Op: title, Path: path, Err: err} } return nil } // ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. // The files should have been extracted to \Files. -func ProcessUtilityVMImage(path string) error { - err := processUtilityImage(path) +func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessUtilityVMImage" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processUtilityImage(path) if err != nil { - return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} + return &os.PathError{Op: title, Path: path, Err: err} } return nil } diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index bccd4596..79fb9867 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -1,26 +1,21 @@ package wclayer import ( + "context" + "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/sirupsen/logrus" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" ) // UnprepareLayer disables the filesystem filter for the read-write layer with // the given id. -func UnprepareLayer(path string) (err error) { +func UnprepareLayer(ctx context.Context, path string) (err error) { title := "hcsshim::UnprepareLayer" - fields := logrus.Fields{ - "path": path, - } - logrus.WithFields(fields).Debug(title) - defer func() { - if err != nil { - fields[logrus.ErrorKey] = err - logrus.WithFields(fields).Error(err) - } else { - logrus.WithFields(fields).Debug(title + " - succeeded") - } - }() + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) err = unprepareLayer(&stdDriverInfo, path) if err != nil { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go index dc40bf51..9b1e06d5 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -1,3 +1,6 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. package wclayer import "github.com/Microsoft/go-winio/pkg/guid" diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 00000000..df28ea24 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,13 @@ +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 00000000..4e80ef68 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,15 @@ +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 00000000..7ce52afd --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,110 @@ +package winapi + +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go new file mode 100644 index 00000000..4e609cbf --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go @@ -0,0 +1,3 @@ +package winapi + +//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 00000000..ba12b1ad --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,215 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const JOB_OBJECT_ALL_ACCESS = 0x1F001F + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +// +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +// +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +// +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 00000000..b6e7cfd4 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 00000000..83f70406 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,27 @@ +package winapi + +// VOID RtlMoveMemory( +// _Out_ VOID UNALIGNED *Destination, +// _In_ const VOID UNALIGNED *Source, +// _In_ SIZE_T Length +// ); +//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree + +// BOOL QueryWorkingSet( +// HANDLE hProcess, +// PVOID pv, +// DWORD cb +// ); +//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet + +type PSAPI_WORKING_SET_INFORMATION struct { + NumberOfEntries uintptr + WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK +} + +type PSAPI_WORKING_SET_BLOCK struct { + Flags uintptr +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 00000000..f3791002 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 00000000..908920e8 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,11 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 00000000..b8706832 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,10 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +// DWORD GetProcessImageFileNameW( +// HANDLE hProcess, +// LPWSTR lpImageFileName, +// DWORD nSize +// ); +//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 00000000..ce79ac2c --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 00000000..327f57d7 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,52 @@ +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 00000000..4724713e --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,12 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 00000000..db59567d --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,75 @@ +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +//String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + // Get length of original `s` to use in the UnicodeString since the `buf` + // created later will have an additional trailing null character + length := len(s) + if length > 32767 { + return nil, syscall.ENAMETOOLONG + } + + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + uni := &UnicodeString{ + Length: uint16(length * 2), + MaximumLength: uint16(length * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 00000000..ec88c0d2 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,5 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi + +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 00000000..2941b0f9 --- /dev/null +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,371 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modpsapi = windows.NewLazySystemDLL("psapi.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet") + procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") +) + +func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize)) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/layer.go b/registry-library/vendor/github.com/Microsoft/hcsshim/layer.go index f60ba550..89161637 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/layer.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/layer.go @@ -1,6 +1,7 @@ package hcsshim import ( + "context" "crypto/sha1" "path/filepath" @@ -13,59 +14,59 @@ func layerPath(info *DriverInfo, id string) string { } func ActivateLayer(info DriverInfo, id string) error { - return wclayer.ActivateLayer(layerPath(&info, id)) + return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) } func CreateLayer(info DriverInfo, id, parent string) error { - return wclayer.CreateLayer(layerPath(&info, id), parent) + return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) } // New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func DeactivateLayer(info DriverInfo, id string) error { - return wclayer.DeactivateLayer(layerPath(&info, id)) + return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) } func DestroyLayer(info DriverInfo, id string) error { - return wclayer.DestroyLayer(layerPath(&info, id)) + return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) } // New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) } func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) } func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths) + return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) } func GetLayerMountPath(info DriverInfo, id string) (string, error) { - return wclayer.GetLayerMountPath(layerPath(&info, id)) + return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) } func GetSharedBaseImages() (imageData string, err error) { - return wclayer.GetSharedBaseImages() + return wclayer.GetSharedBaseImages(context.Background()) } func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths) + return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) } func LayerExists(info DriverInfo, id string) (bool, error) { - return wclayer.LayerExists(layerPath(&info, id)) + return wclayer.LayerExists(context.Background(), layerPath(&info, id)) } func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths) + return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) } func ProcessBaseLayer(path string) error { - return wclayer.ProcessBaseLayer(path) + return wclayer.ProcessBaseLayer(context.Background(), path) } func ProcessUtilityVMImage(path string) error { - return wclayer.ProcessUtilityVMImage(path) + return wclayer.ProcessUtilityVMImage(context.Background(), path) } func UnprepareLayer(info DriverInfo, layerId string) error { - return wclayer.UnprepareLayer(layerPath(&info, layerId)) + return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) } type DriverInfo struct { @@ -76,7 +77,7 @@ type DriverInfo struct { type GUID [16]byte func NameToGuid(name string) (id GUID, err error) { - g, err := wclayer.NameToGuid(name) + g, err := wclayer.NameToGuid(context.Background(), name) return g.ToWindowsArray(), err } @@ -94,13 +95,13 @@ func (g *GUID) ToString() string { type LayerReader = wclayer.LayerReader func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths) + return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) } type LayerWriter = wclayer.LayerWriter func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths) + return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) } type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go index 477fe707..42e58403 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -15,21 +15,6 @@ type OSVersion struct { Build uint16 } -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - // Get gets the operating system version on Windows. // The calling application must be manifested to get the correct version information. func Get() OSVersion { diff --git a/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index 726d1c8c..e9267b95 100644 --- a/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/registry-library/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -24,4 +24,15 @@ const ( // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual // channel). V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 ) diff --git a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go index c7884e8e..6d2d4177 100644 --- a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go +++ b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" ) @@ -22,19 +23,21 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Metrics struct { - Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` - Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` - Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` - Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Metrics) Reset() { *m = Metrics{} } @@ -50,7 +53,7 @@ func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -92,7 +95,7 @@ func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -132,7 +135,7 @@ func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -172,7 +175,7 @@ func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -215,7 +218,7 @@ func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -256,7 +259,7 @@ func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -330,7 +333,7 @@ func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -372,7 +375,7 @@ func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -391,6 +394,47 @@ func (m *MemoryEntry) XXX_DiscardUnknown() { var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo +type MemoryOomControl struct { + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } +func (*MemoryOomControl) ProtoMessage() {} +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{8} +} +func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryOomControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryOomControl.Merge(m, src) +} +func (m *MemoryOomControl) XXX_Size() int { + return m.Size() +} +func (m *MemoryOomControl) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo + type BlkIOStat struct { IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` @@ -408,7 +452,7 @@ type BlkIOStat struct { func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } func (*BlkIOStat) ProtoMessage() {} func (*BlkIOStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{8} + return fileDescriptor_a17b2d87c332bfaa, []int{9} } func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -418,7 +462,7 @@ func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -451,7 +495,7 @@ type BlkIOEntry struct { func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } func (*BlkIOEntry) ProtoMessage() {} func (*BlkIOEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{9} + return fileDescriptor_a17b2d87c332bfaa, []int{10} } func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -461,7 +505,7 @@ func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -491,7 +535,7 @@ type RdmaStat struct { func (m *RdmaStat) Reset() { *m = RdmaStat{} } func (*RdmaStat) ProtoMessage() {} func (*RdmaStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{10} + return fileDescriptor_a17b2d87c332bfaa, []int{11} } func (m *RdmaStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -501,7 +545,7 @@ func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -532,7 +576,7 @@ type RdmaEntry struct { func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } func (*RdmaEntry) ProtoMessage() {} func (*RdmaEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{11} + return fileDescriptor_a17b2d87c332bfaa, []int{12} } func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -542,7 +586,7 @@ func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -579,7 +623,7 @@ type NetworkStat struct { func (m *NetworkStat) Reset() { *m = NetworkStat{} } func (*NetworkStat) ProtoMessage() {} func (*NetworkStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{12} + return fileDescriptor_a17b2d87c332bfaa, []int{13} } func (m *NetworkStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -589,7 +633,7 @@ func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -608,6 +652,55 @@ func (m *NetworkStat) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkStat proto.InternalMessageInfo +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{14} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + func init() { proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") @@ -617,11 +710,13 @@ func init() { proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") } func init() { @@ -629,111 +724,123 @@ func init() { } var fileDescriptor_a17b2d87c332bfaa = []byte{ - // 1558 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0xcf, 0x73, 0x13, 0x39, - 0x16, 0xc6, 0xb1, 0x13, 0xbb, 0x9f, 0x93, 0x90, 0x28, 0x10, 0x3a, 0x01, 0xe2, 0xe0, 0x24, 0xbb, - 0xd9, 0xa5, 0xca, 0x29, 0xd8, 0x2d, 0x6a, 0x61, 0xa1, 0xb6, 0x70, 0x80, 0x82, 0xda, 0xcd, 0x62, - 0xda, 0x49, 0xb1, 0x7b, 0xea, 0x92, 0xdb, 0xa2, 0xad, 0xc4, 0x6e, 0x35, 0x6a, 0xb5, 0xe3, 0xcc, - 0x69, 0x0e, 0x53, 0x35, 0xa7, 0xf9, 0x67, 0xe6, 0xaf, 0xe0, 0x38, 0x97, 0xa9, 0x9a, 0xb9, 0xa4, - 0x06, 0xff, 0x25, 0x53, 0x92, 0xfa, 0x87, 0x0c, 0x84, 0x8c, 0x6f, 0x2d, 0xe9, 0xfb, 0xbe, 0xf7, - 0xf4, 0xfa, 0x53, 0xeb, 0x35, 0xfc, 0xdd, 0xa7, 0xa2, 0x17, 0x77, 0x1a, 0x1e, 0x1b, 0xec, 0x79, - 0x2c, 0x10, 0x98, 0x06, 0x84, 0x77, 0xf7, 0x3c, 0x9f, 0xb3, 0x38, 0x8c, 0xf6, 0x22, 0x81, 0x45, - 0xb4, 0x37, 0xbc, 0xb7, 0x37, 0x20, 0x82, 0x53, 0x2f, 0x6a, 0x84, 0x9c, 0x09, 0x86, 0x6c, 0xca, - 0x1a, 0x39, 0xba, 0x91, 0xa0, 0x1b, 0xc3, 0x7b, 0xeb, 0xd7, 0x7c, 0xe6, 0x33, 0x05, 0xda, 0x93, - 0x4f, 0x1a, 0x5f, 0xff, 0xb1, 0x08, 0xe5, 0x03, 0xad, 0x80, 0xfe, 0x05, 0xe5, 0x5e, 0xec, 0x13, - 0xd1, 0xef, 0xd8, 0x85, 0xcd, 0xe2, 0x6e, 0xf5, 0xfe, 0x4e, 0xe3, 0x22, 0xb5, 0xc6, 0x4b, 0x0d, - 0x6c, 0x0b, 0x2c, 0x9c, 0x94, 0x85, 0x1e, 0x40, 0x29, 0xa4, 0xdd, 0xc8, 0x9e, 0xd9, 0x2c, 0xec, - 0x56, 0xef, 0xd7, 0x2f, 0x66, 0xb7, 0x68, 0x37, 0x52, 0x54, 0x85, 0x47, 0x8f, 0xa1, 0xe8, 0x85, - 0xb1, 0x5d, 0x54, 0xb4, 0x3b, 0x17, 0xd3, 0xf6, 0x5b, 0x47, 0x92, 0xd5, 0x2c, 0x8f, 0xcf, 0x6b, - 0xc5, 0xfd, 0xd6, 0x91, 0x23, 0x69, 0xe8, 0x31, 0xcc, 0x0d, 0xc8, 0x80, 0xf1, 0x33, 0xbb, 0xa4, - 0x04, 0xb6, 0x2f, 0x16, 0x38, 0x50, 0x38, 0x15, 0x39, 0xe1, 0xa0, 0x87, 0x30, 0xdb, 0xe9, 0x9f, - 0x50, 0x66, 0xcf, 0x2a, 0xf2, 0xd6, 0xc5, 0xe4, 0x66, 0xff, 0xe4, 0xd5, 0x6b, 0xc5, 0xd5, 0x0c, - 0xb9, 0x5d, 0xde, 0x1d, 0x60, 0x7b, 0xee, 0xb2, 0xed, 0x3a, 0xdd, 0x01, 0xd6, 0xdb, 0x95, 0x78, - 0x59, 0xe7, 0x80, 0x88, 0x53, 0xc6, 0x4f, 0xec, 0xf2, 0x65, 0x75, 0xfe, 0xaf, 0x06, 0xea, 0x3a, - 0x27, 0xac, 0xfa, 0x09, 0x54, 0x8d, 0xfa, 0xa3, 0x6b, 0x30, 0x1b, 0x47, 0xd8, 0x27, 0x76, 0x61, - 0xb3, 0xb0, 0x5b, 0x72, 0xf4, 0x00, 0x2d, 0x41, 0x71, 0x80, 0x47, 0xea, 0x5d, 0x94, 0x1c, 0xf9, - 0x88, 0x6c, 0x28, 0xbf, 0xc3, 0xb4, 0xef, 0x05, 0x42, 0x95, 0xba, 0xe4, 0xa4, 0x43, 0xb4, 0x0e, - 0x95, 0x10, 0xfb, 0x24, 0xa2, 0xdf, 0x10, 0x55, 0x44, 0xcb, 0xc9, 0xc6, 0xf5, 0x47, 0x50, 0x49, - 0x5f, 0x97, 0x54, 0xf0, 0x62, 0xce, 0x49, 0x20, 0x92, 0x58, 0xe9, 0x50, 0xe6, 0xd0, 0xa7, 0x03, - 0x2a, 0x92, 0x78, 0x7a, 0x50, 0xff, 0xbe, 0x00, 0xe5, 0xe4, 0xa5, 0xa1, 0x7f, 0x98, 0x59, 0x7e, - 0xb5, 0x5c, 0xfb, 0xad, 0xa3, 0x23, 0x89, 0x4c, 0x77, 0xd2, 0x04, 0x10, 0x3d, 0xce, 0x84, 0xe8, - 0xd3, 0xc0, 0xbf, 0xdc, 0x5c, 0x87, 0x1a, 0x4b, 0x1c, 0x83, 0x55, 0x7f, 0x0f, 0x95, 0x54, 0x56, - 0xe6, 0x2a, 0x98, 0xc0, 0xfd, 0xb4, 0x5e, 0x6a, 0x80, 0x56, 0x61, 0xee, 0x84, 0xf0, 0x80, 0xf4, - 0x93, 0x2d, 0x24, 0x23, 0x84, 0xa0, 0x14, 0x47, 0x84, 0x27, 0x25, 0x53, 0xcf, 0x68, 0x0b, 0xca, - 0x21, 0xe1, 0xae, 0x34, 0x6d, 0x69, 0xb3, 0xb8, 0x5b, 0x6a, 0xc2, 0xf8, 0xbc, 0x36, 0xd7, 0x22, - 0x5c, 0x9a, 0x72, 0x2e, 0x24, 0x7c, 0x3f, 0x8c, 0xeb, 0x23, 0xa8, 0xa4, 0xa9, 0xc8, 0xc2, 0x85, - 0x84, 0x53, 0xd6, 0x8d, 0xd2, 0xc2, 0x25, 0x43, 0x74, 0x17, 0x96, 0x93, 0x34, 0x49, 0xd7, 0x4d, - 0x31, 0x3a, 0x83, 0xa5, 0x6c, 0xa1, 0x95, 0x80, 0x77, 0x60, 0x31, 0x07, 0x0b, 0x3a, 0x20, 0x49, - 0x56, 0x0b, 0xd9, 0xec, 0x21, 0x1d, 0x90, 0xfa, 0xaf, 0x55, 0x80, 0xdc, 0xea, 0x72, 0xbf, 0x1e, - 0xf6, 0x7a, 0x99, 0x3f, 0xd4, 0x00, 0xad, 0x41, 0x91, 0x47, 0x49, 0x28, 0x7d, 0xa2, 0x9c, 0x76, - 0xdb, 0x91, 0x73, 0xe8, 0x4f, 0x50, 0xe1, 0x51, 0xe4, 0xca, 0x63, 0xad, 0x03, 0x34, 0xab, 0xe3, - 0xf3, 0x5a, 0xd9, 0x69, 0xb7, 0xa5, 0xed, 0x9c, 0x32, 0x8f, 0x22, 0xf9, 0x80, 0x6a, 0x50, 0x1d, - 0xe0, 0x30, 0x24, 0x5d, 0xf7, 0x1d, 0xed, 0x6b, 0xe7, 0x94, 0x1c, 0xd0, 0x53, 0x2f, 0x68, 0x5f, - 0x55, 0xba, 0x4b, 0xb9, 0x38, 0x53, 0x87, 0xab, 0xe4, 0xe8, 0x01, 0xba, 0x05, 0xd6, 0x29, 0xa7, - 0x82, 0x74, 0xb0, 0x77, 0xa2, 0x0e, 0x4f, 0xc9, 0xc9, 0x27, 0x90, 0x0d, 0x95, 0xd0, 0x77, 0x43, - 0xdf, 0xa5, 0x81, 0x5d, 0xd6, 0x6f, 0x22, 0xf4, 0x5b, 0xfe, 0xab, 0x00, 0xad, 0x83, 0xa5, 0x57, - 0x58, 0x2c, 0xec, 0x4a, 0x52, 0x46, 0xbf, 0xe5, 0xbf, 0x8e, 0x05, 0x5a, 0x53, 0xac, 0x77, 0x38, - 0xee, 0x0b, 0xdb, 0x4a, 0x97, 0x5e, 0xc8, 0x21, 0xda, 0x84, 0xf9, 0xd0, 0x77, 0x07, 0xf8, 0x38, - 0x59, 0x06, 0x9d, 0x66, 0xe8, 0x1f, 0xe0, 0x63, 0x8d, 0xd8, 0x82, 0x05, 0x1a, 0x60, 0x4f, 0xd0, - 0x21, 0x71, 0x71, 0xc0, 0x02, 0xbb, 0xaa, 0x20, 0xf3, 0xe9, 0xe4, 0xd3, 0x80, 0x05, 0x72, 0xb3, - 0x26, 0x64, 0x5e, 0xab, 0x18, 0x00, 0x53, 0x45, 0xd5, 0x63, 0x61, 0x52, 0x45, 0x55, 0x24, 0x57, - 0x51, 0x90, 0x45, 0x53, 0x45, 0x01, 0x36, 0xa1, 0x1a, 0x07, 0x64, 0x48, 0x3d, 0x81, 0x3b, 0x7d, - 0x62, 0x5f, 0x55, 0x00, 0x73, 0x0a, 0x3d, 0x82, 0xb5, 0x1e, 0x25, 0x1c, 0x73, 0xaf, 0x47, 0x3d, - 0xdc, 0x77, 0xf5, 0x87, 0xcc, 0xd5, 0xc7, 0x6f, 0x49, 0xe1, 0x6f, 0x98, 0x00, 0xed, 0x84, 0xff, - 0xc8, 0x65, 0xf4, 0x00, 0x26, 0x96, 0xdc, 0xe8, 0x14, 0x87, 0x09, 0x73, 0x59, 0x31, 0xaf, 0x9b, - 0xcb, 0xed, 0x53, 0x1c, 0x6a, 0x5e, 0x0d, 0xaa, 0xea, 0x94, 0xb8, 0xda, 0x48, 0x48, 0xa7, 0xad, - 0xa6, 0xf6, 0x95, 0x9b, 0xfe, 0x02, 0x96, 0x06, 0x48, 0x4f, 0xad, 0x28, 0xcf, 0xcc, 0x8f, 0xcf, - 0x6b, 0x95, 0x43, 0x39, 0x29, 0x8d, 0x55, 0x51, 0xcb, 0x4e, 0x14, 0xa1, 0x07, 0xb0, 0x98, 0x41, - 0xb5, 0xc7, 0xae, 0x29, 0xfc, 0xd2, 0xf8, 0xbc, 0x36, 0x9f, 0xe2, 0x95, 0xd1, 0xe6, 0x53, 0x8e, - 0x72, 0xdb, 0x5f, 0x61, 0x59, 0xf3, 0x4c, 0xcf, 0x5d, 0x57, 0x99, 0x5c, 0x55, 0x0b, 0x07, 0xb9, - 0xf1, 0xb2, 0x7c, 0xb5, 0xfd, 0x56, 0x8d, 0x7c, 0x9f, 0x29, 0x0f, 0xfe, 0x19, 0x34, 0xc7, 0xcd, - 0x9d, 0x78, 0x43, 0x81, 0x74, 0x6e, 0x6f, 0x33, 0x3b, 0x6e, 0xa5, 0xd9, 0x66, 0xa6, 0xb4, 0xf5, - 0x2b, 0x51, 0xb3, 0x2d, 0xed, 0xcc, 0x9d, 0x54, 0x2d, 0xf7, 0xe7, 0x9a, 0x7e, 0xf9, 0x19, 0x4a, - 0x9a, 0x74, 0xdb, 0xd0, 0xd2, 0x5e, 0x5c, 0x9f, 0x40, 0x69, 0x37, 0xde, 0x05, 0x94, 0xa1, 0x72, - 0xd7, 0xde, 0x34, 0x36, 0xda, 0xca, 0xad, 0xdb, 0x80, 0x15, 0x0d, 0x9e, 0x34, 0xf0, 0x2d, 0x85, - 0xd6, 0xf5, 0x7a, 0x65, 0xba, 0x38, 0x2b, 0xa2, 0x89, 0xbe, 0x6d, 0x68, 0x3f, 0xcd, 0xb1, 0x9f, - 0x6b, 0xab, 0x92, 0x6f, 0x7c, 0x41, 0x5b, 0x15, 0xfd, 0x53, 0x6d, 0x85, 0xae, 0x7d, 0xa6, 0xad, - 0xb0, 0x77, 0x53, 0xac, 0x69, 0xf6, 0xcd, 0xe4, 0xb3, 0x27, 0x17, 0x8e, 0x0c, 0xc7, 0xff, 0x33, - 0xbd, 0x3a, 0xee, 0xa8, 0x6f, 0xff, 0xce, 0x65, 0x17, 0xfc, 0xf3, 0x40, 0xf0, 0xb3, 0xf4, 0xf6, - 0x78, 0x08, 0x25, 0xe9, 0x72, 0xbb, 0x3e, 0x0d, 0x57, 0x51, 0xd0, 0x93, 0xec, 0x4a, 0xd8, 0x9a, - 0x86, 0x9c, 0xde, 0x1c, 0x6d, 0x00, 0xfd, 0xe4, 0x0a, 0x2f, 0xb4, 0xb7, 0xa7, 0x90, 0x68, 0x2e, - 0x8c, 0xcf, 0x6b, 0xd6, 0xbf, 0x15, 0xf9, 0x70, 0xbf, 0xe5, 0x58, 0x5a, 0xe7, 0xd0, 0x0b, 0xeb, - 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd, 0x9b, 0x77, 0x04, 0x33, 0x5f, 0xe8, 0x08, - 0x8a, 0x5f, 0xec, 0x08, 0x4a, 0x13, 0x1d, 0x41, 0xfd, 0xe7, 0x59, 0xb0, 0xb2, 0x86, 0x07, 0x61, - 0x58, 0xa7, 0xcc, 0x8d, 0x08, 0x1f, 0x52, 0x8f, 0xb8, 0x9d, 0x33, 0x41, 0x22, 0x97, 0x13, 0x2f, - 0xe6, 0x11, 0x1d, 0x92, 0xa4, 0x59, 0xdc, 0xbe, 0xa4, 0x73, 0xd2, 0xb5, 0xb9, 0x41, 0x59, 0x5b, - 0xcb, 0x34, 0xa5, 0x8a, 0x93, 0x8a, 0xa0, 0xff, 0xc1, 0xf5, 0x3c, 0x44, 0xd7, 0x50, 0x9f, 0x99, - 0x42, 0x7d, 0x25, 0x53, 0xef, 0xe6, 0xca, 0x87, 0xb0, 0x42, 0x99, 0xfb, 0x3e, 0x26, 0xf1, 0x84, - 0x6e, 0x71, 0x0a, 0xdd, 0x65, 0xca, 0xde, 0x28, 0x7e, 0xae, 0xea, 0xc2, 0x9a, 0x51, 0x12, 0x79, - 0x17, 0x1b, 0xda, 0xa5, 0x29, 0xb4, 0x57, 0xb3, 0x9c, 0xe5, 0xdd, 0x9d, 0x07, 0xf8, 0x3f, 0xac, - 0x52, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x55, 0x9f, 0x9d, 0xae, 0x22, 0x6f, 0x31, 0x15, 0x93, 0xd2, - 0xba, 0x22, 0x03, 0xc2, 0xfd, 0x89, 0x8a, 0xcc, 0x4d, 0x57, 0x91, 0x03, 0xc5, 0xcf, 0x55, 0x5b, - 0xb0, 0x4c, 0xd9, 0xa7, 0xb9, 0x96, 0xa7, 0xd0, 0xbc, 0x4a, 0xd9, 0x64, 0x9e, 0x6f, 0x60, 0x39, - 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x0a, 0xc5, 0xa5, 0x84, 0x9e, 0x49, 0xd6, 0x87, 0x00, - 0xf9, 0x3a, 0x5a, 0x84, 0x19, 0x16, 0xaa, 0xa3, 0x63, 0x39, 0x33, 0x2c, 0x94, 0x3d, 0x60, 0x57, - 0x7e, 0x76, 0xf4, 0xc1, 0xb1, 0x9c, 0x64, 0x24, 0xcf, 0xd3, 0x00, 0x1f, 0xb3, 0xb4, 0x09, 0xd4, - 0x03, 0x35, 0x4b, 0x03, 0xc6, 0x93, 0xb3, 0xa3, 0x07, 0x72, 0x76, 0x88, 0xfb, 0x31, 0x49, 0x7b, - 0x1e, 0x35, 0xa8, 0x7f, 0x57, 0x80, 0x4a, 0xfa, 0x1b, 0x80, 0x9e, 0x98, 0x6d, 0x74, 0xf1, 0xeb, - 0x7f, 0x1d, 0x92, 0xa4, 0x37, 0x93, 0xf5, 0xda, 0x0f, 0xf3, 0x5e, 0xfb, 0x0f, 0x93, 0x93, 0x86, - 0x9c, 0x80, 0x95, 0xcd, 0x19, 0xbb, 0x2d, 0x4c, 0xec, 0xb6, 0x06, 0xd5, 0x9e, 0x87, 0xdd, 0x1e, - 0x0e, 0xba, 0x7d, 0xa2, 0x3b, 0xc4, 0x05, 0x07, 0x7a, 0x1e, 0x7e, 0xa9, 0x67, 0x52, 0x00, 0xeb, - 0x1c, 0x13, 0x4f, 0x44, 0xaa, 0x28, 0x1a, 0xf0, 0x5a, 0xcf, 0xd4, 0x7f, 0x98, 0x81, 0xaa, 0xf1, - 0xe7, 0x22, 0x7b, 0xe8, 0x00, 0x0f, 0xd2, 0x38, 0xea, 0x59, 0x76, 0x6c, 0x7c, 0xa4, 0xbf, 0x25, - 0xc9, 0x67, 0xaa, 0xcc, 0x47, 0xea, 0xa3, 0x80, 0x6e, 0x03, 0xf0, 0x91, 0x1b, 0x62, 0xef, 0x84, - 0x24, 0xf2, 0x25, 0xc7, 0xe2, 0xa3, 0x96, 0x9e, 0x40, 0x37, 0xc1, 0xe2, 0x23, 0x97, 0x70, 0xce, - 0x78, 0x94, 0xd4, 0xbe, 0xc2, 0x47, 0xcf, 0xd5, 0x38, 0xe1, 0x76, 0x39, 0x93, 0xbd, 0x40, 0xf2, - 0x0e, 0x2c, 0x3e, 0x7a, 0xa6, 0x27, 0x64, 0x54, 0x91, 0x46, 0xd5, 0xad, 0x67, 0x59, 0xe4, 0x51, - 0x45, 0x1e, 0x55, 0xb7, 0x9e, 0x96, 0x30, 0xa3, 0x8a, 0x2c, 0xaa, 0xee, 0x3e, 0x2b, 0xc2, 0x88, - 0x2a, 0xf2, 0xa8, 0x56, 0xca, 0x4d, 0xa2, 0x36, 0xed, 0x0f, 0x1f, 0x37, 0xae, 0xfc, 0xf2, 0x71, - 0xe3, 0xca, 0xb7, 0xe3, 0x8d, 0xc2, 0x87, 0xf1, 0x46, 0xe1, 0xa7, 0xf1, 0x46, 0xe1, 0xb7, 0xf1, - 0x46, 0xa1, 0x33, 0xa7, 0x7e, 0xc3, 0xff, 0xf6, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xc0, - 0x49, 0x92, 0xee, 0x0f, 0x00, 0x00, + // 1749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, + 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, + 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, + 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, + 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, + 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, + 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, + 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, + 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, + 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, + 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, + 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, + 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, + 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, + 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, + 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, + 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, + 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, + 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, + 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, + 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, + 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, + 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, + 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, + 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, + 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, + 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, + 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, + 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, + 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, + 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, + 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, + 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, + 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, + 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, + 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, + 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, + 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, + 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, + 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, + 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, + 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, + 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, + 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, + 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, + 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, + 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, + 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, + 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, + 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, + 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, + 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, + 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, + 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, + 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, + 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, + 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, + 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, + 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, + 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, + 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, + 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, + 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, + 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, + 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, + 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, + 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, + 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, + 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, + 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, + 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, + 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, + 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, + 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, + 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, + 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, + 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, + 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, + 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, + 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, + 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, + 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, + 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, + 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, + 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, + 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, + 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, + 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, + 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, + 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, + 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, + 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, + 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, + 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, + 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, + 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, + 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, + 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, + 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, + 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, + 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, + 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, + 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, + 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, + 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, + 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, + 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, + 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, + 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, + 0x22, 0xc4, 0x11, 0x00, 0x00, } func (m *Metrics) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -741,94 +848,138 @@ func (m *Metrics) Marshal() (dAtA []byte, err error) { } func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hugetlb) > 0 { - for _, msg := range m.Hugetlb { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemoryOomControl != nil { + { + size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - if m.Pids != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Pids.Size())) - n1, err := m.Pids.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CgroupStats != nil { + { + size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x42 } - if m.CPU != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.CPU.Size())) - n2, err := m.CPU.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Network) > 0 { + for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - i += n2 } - if m.Memory != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Memory.Size())) - n3, err := m.Memory.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Rdma != nil { + { + size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x32 } if m.Blkio != nil { + { + size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Blkio.Size())) - n4, err := m.Blkio.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x22 } - if m.Rdma != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Rdma.Size())) - n5, err := m.Rdma.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x1a } - if len(m.Network) > 0 { - for _, msg := range m.Network { - dAtA[i] = 0x3a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Pids != nil { + { + size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Hugetlb) > 0 { + for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -836,41 +987,48 @@ func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { } func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Usage != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Max != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + if len(m.Pagesize) > 0 { + i -= len(m.Pagesize) + copy(dAtA[i:], m.Pagesize) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i-- + dAtA[i] = 0x22 } if m.Failcnt != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x18 } - if len(m.Pagesize) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i += copy(dAtA[i:], m.Pagesize) + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *PidsStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -878,30 +1036,36 @@ func (m *PidsStat) Marshal() (dAtA []byte, err error) { } func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Current != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if m.Limit != 0 { - dAtA[i] = 0x10 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.Current != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *CPUStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -909,40 +1073,50 @@ func (m *CPUStat) Marshal() (dAtA []byte, err error) { } func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Usage != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n6, err := m.Usage.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if m.Throttling != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) - n7, err := m.Throttling.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CPUUsage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -950,52 +1124,59 @@ func (m *CPUUsage) Marshal() (dAtA []byte, err error) { } func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Total != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) - } - if m.Kernel != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) - } - if m.User != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if len(m.PerCPU) > 0 { - dAtA9 := make([]byte, len(m.PerCPU)*10) - var j8 int + dAtA11 := make([]byte, len(m.PerCPU)*10) + var j10 int for _, num := range m.PerCPU { for num >= 1<<7 { - dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j8++ + j10++ } - dAtA9[j8] = uint8(num) - j8++ + dAtA11[j10] = uint8(num) + j10++ } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(j8)) - i += copy(dAtA[i:], dAtA9[:j8]) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.User != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + i-- + dAtA[i] = 0x18 + } + if m.Kernel != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Throttle) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1003,35 +1184,41 @@ func (m *Throttle) Marshal() (dAtA []byte, err error) { } func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Periods != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if m.ThrottledTime != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + i-- + dAtA[i] = 0x18 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.ThrottledPeriods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + i-- + dAtA[i] = 0x10 + } + if m.Periods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MemoryStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1039,262 +1226,276 @@ func (m *MemoryStat) Marshal() (dAtA []byte, err error) { } func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Cache != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) - } - if m.RSS != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.RSSHuge != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + if m.KernelTCP != nil { + { + size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 } - if m.MappedFile != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + if m.Kernel != nil { + { + size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a } - if m.Dirty != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) - } - if m.Writeback != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - dAtA[i] = 0x50 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - dAtA[i] = 0x58 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - dAtA[i] = 0x70 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } - if m.Unevictable != 0 { - dAtA[i] = 0x78 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } - if m.HierarchicalMemoryLimit != 0 { + if m.TotalUnevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + i-- + dAtA[i] = 0x2 + i-- dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) } - if m.HierarchicalSwapLimit != 0 { - dAtA[i] = 0x88 - i++ + if m.TotalActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- + dAtA[i] = 0xf8 } - if m.TotalCache != 0 { - dAtA[i] = 0x90 - i++ + if m.TotalInactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- + dAtA[i] = 0xf0 } - if m.TotalRSS != 0 { - dAtA[i] = 0x98 - i++ + if m.TotalActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- + dAtA[i] = 0xe8 } - if m.TotalRSSHuge != 0 { - dAtA[i] = 0xa0 - i++ + if m.TotalInactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- + dAtA[i] = 0xe0 } - if m.TotalMappedFile != 0 { - dAtA[i] = 0xa8 - i++ + if m.TotalPgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- + dAtA[i] = 0xd8 } - if m.TotalDirty != 0 { - dAtA[i] = 0xb0 - i++ + if m.TotalPgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- + dAtA[i] = 0xd0 } - if m.TotalWriteback != 0 { - dAtA[i] = 0xb8 - i++ + if m.TotalPgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- + dAtA[i] = 0xc8 } if m.TotalPgPgIn != 0 { - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x1 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 } - if m.TotalPgPgOut != 0 { - dAtA[i] = 0xc8 - i++ + if m.TotalWriteback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- + dAtA[i] = 0xb8 } - if m.TotalPgFault != 0 { - dAtA[i] = 0xd0 - i++ + if m.TotalDirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- + dAtA[i] = 0xb0 } - if m.TotalPgMajFault != 0 { - dAtA[i] = 0xd8 - i++ + if m.TotalMappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- + dAtA[i] = 0xa8 } - if m.TotalInactiveAnon != 0 { - dAtA[i] = 0xe0 - i++ + if m.TotalRSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- + dAtA[i] = 0xa0 } - if m.TotalActiveAnon != 0 { - dAtA[i] = 0xe8 - i++ + if m.TotalRSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- + dAtA[i] = 0x98 } - if m.TotalInactiveFile != 0 { - dAtA[i] = 0xf0 - i++ + if m.TotalCache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- + dAtA[i] = 0x90 } - if m.TotalActiveFile != 0 { - dAtA[i] = 0xf8 - i++ + if m.HierarchicalSwapLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- + dAtA[i] = 0x88 } - if m.TotalUnevictable != 0 { + if m.HierarchicalMemoryLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + i-- + dAtA[i] = 0x1 + i-- dAtA[i] = 0x80 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) } - if m.Usage != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n10, err := m.Usage.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + if m.Unevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + i-- + dAtA[i] = 0x78 } - if m.Swap != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) - n11, err := m.Swap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 + if m.ActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + i-- + dAtA[i] = 0x70 } - if m.Kernel != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) - n12, err := m.Kernel.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 + if m.InactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + i-- + dAtA[i] = 0x68 } - if m.KernelTCP != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) - n13, err := m.KernelTCP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 + if m.ActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + i-- + dAtA[i] = 0x60 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.InactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + i-- + dAtA[i] = 0x58 + } + if m.PgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + i-- + dAtA[i] = 0x50 + } + if m.PgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + i-- + dAtA[i] = 0x48 + } + if m.PgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + i-- + dAtA[i] = 0x40 } - return i, nil + if m.PgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + i-- + dAtA[i] = 0x38 + } + if m.Writeback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + i-- + dAtA[i] = 0x30 + } + if m.Dirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + i-- + dAtA[i] = 0x28 + } + if m.MappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + i-- + dAtA[i] = 0x20 + } + if m.RSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + i-- + dAtA[i] = 0x18 + } + if m.RSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + i-- + dAtA[i] = 0x10 + } + if m.Cache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1302,40 +1503,88 @@ func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { } func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Limit != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Usage != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x20 } if m.Max != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x18 } - if m.Failcnt != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x10 } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OomKill != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) + i-- + dAtA[i] = 0x18 + } + if m.UnderOom != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) + i-- + dAtA[i] = 0x10 } - return i, nil + if m.OomKillDisable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1343,116 +1592,138 @@ func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { } func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, msg := range m.IoServiceBytesRecursive { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.IoServicedRecursive) > 0 { - for _, msg := range m.IoServicedRecursive { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.SectorsRecursive) > 0 { + for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x42 } } - if len(m.IoQueuedRecursive) > 0 { - for _, msg := range m.IoQueuedRecursive { - dAtA[i] = 0x1a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.IoTimeRecursive) > 0 { + for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x3a } } - if len(m.IoServiceTimeRecursive) > 0 { - for _, msg := range m.IoServiceTimeRecursive { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.IoMergedRecursive) > 0 { + for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } if len(m.IoWaitTimeRecursive) > 0 { - for _, msg := range m.IoWaitTimeRecursive { - dAtA[i] = 0x2a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.IoMergedRecursive) > 0 { - for _, msg := range m.IoMergedRecursive { - dAtA[i] = 0x32 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.IoServiceTimeRecursive) > 0 { + for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - if len(m.IoTimeRecursive) > 0 { - for _, msg := range m.IoTimeRecursive { - dAtA[i] = 0x3a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.IoQueuedRecursive) > 0 { + for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.SectorsRecursive) > 0 { - for _, msg := range m.SectorsRecursive { - dAtA[i] = 0x42 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.IoServicedRecursive) > 0 { + for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.IoServiceBytesRecursive) > 0 { + for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1460,47 +1731,55 @@ func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { } func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Op) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) - i += copy(dAtA[i:], m.Op) - } - if len(m.Device) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i += copy(dAtA[i:], m.Device) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Major != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + if m.Value != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x28 } if m.Minor != 0 { - dAtA[i] = 0x20 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 } - if m.Value != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + if m.Major != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x12 + } + if len(m.Op) > 0 { + i -= len(m.Op) + copy(dAtA[i:], m.Op) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RdmaStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1508,44 +1787,54 @@ func (m *RdmaStat) Marshal() (dAtA []byte, err error) { } func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Current) > 0 { - for _, msg := range m.Current { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if len(m.Limit) > 0 { - for _, msg := range m.Limit { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Current) > 0 { + for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1553,36 +1842,43 @@ func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { } func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Device) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i += copy(dAtA[i:], m.Device) - } - if m.HcaHandles != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if m.HcaObjects != 0 { - dAtA[i] = 0x18 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + i-- + dAtA[i] = 0x18 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if m.HcaHandles != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + i-- + dAtA[i] = 0x10 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkStat) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1590,70 +1886,131 @@ func (m *NetworkStat) Marshal() (dAtA []byte, err error) { } func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.RxBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + if m.TxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + i-- + dAtA[i] = 0x48 } - if m.RxPackets != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + if m.TxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + i-- + dAtA[i] = 0x40 } - if m.RxErrors != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + if m.TxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + i-- + dAtA[i] = 0x38 + } + if m.TxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + i-- + dAtA[i] = 0x30 } if m.RxDropped != 0 { - dAtA[i] = 0x28 - i++ i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + i-- + dAtA[i] = 0x28 } - if m.TxBytes != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + if m.RxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + i-- + dAtA[i] = 0x20 } - if m.TxPackets != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + if m.RxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + i-- + dAtA[i] = 0x18 } - if m.TxErrors != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + if m.RxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + i-- + dAtA[i] = 0x10 } - if m.TxDropped != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NrIoWait != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + i-- + dAtA[i] = 0x28 + } + if m.NrUninterruptible != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + i-- + dAtA[i] = 0x20 + } + if m.NrStopped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + i-- + dAtA[i] = 0x18 + } + if m.NrRunning != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + i-- + dAtA[i] = 0x10 + } + if m.NrSleeping != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Metrics) Size() (n int) { if m == nil { @@ -1693,6 +2050,14 @@ func (m *Metrics) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.MemoryOomControl != nil { + l = m.MemoryOomControl.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1959,6 +2324,27 @@ func (m *MemoryEntry) Size() (n int) { return n } +func (m *MemoryOomControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OomKillDisable != 0 { + n += 1 + sovMetrics(uint64(m.OomKillDisable)) + } + if m.UnderOom != 0 { + n += 1 + sovMetrics(uint64(m.UnderOom)) + } + if m.OomKill != 0 { + n += 1 + sovMetrics(uint64(m.OomKill)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *BlkIOStat) Size() (n int) { if m == nil { return 0 @@ -2134,16 +2520,36 @@ func (m *NetworkStat) Size() (n int) { return n } -func sovMetrics(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } return n } + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozMetrics(x uint64) (n int) { return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -2151,14 +2557,26 @@ func (this *Metrics) String() string { if this == nil { return "nil" } + repeatedStringForHugetlb := "[]*HugetlbStat{" + for _, f := range this.Hugetlb { + repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," + } + repeatedStringForHugetlb += "}" + repeatedStringForNetwork := "[]*NetworkStat{" + for _, f := range this.Network { + repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," + } + repeatedStringForNetwork += "}" s := strings.Join([]string{`&Metrics{`, - `Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugetlbStat", "HugetlbStat", 1) + `,`, - `Pids:` + strings.Replace(fmt.Sprintf("%v", this.Pids), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, - `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, - `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, - `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`, + `Hugetlb:` + repeatedStringForHugetlb + `,`, + `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + repeatedStringForNetwork + `,`, + `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, + `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2195,8 +2613,8 @@ func (this *CPUStat) String() string { return "nil" } s := strings.Join([]string{`&CPUStat{`, - `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "CPUUsage", "CPUUsage", 1) + `,`, - `Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "Throttle", "Throttle", 1) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2266,10 +2684,10 @@ func (this *MemoryStat) String() string { `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, - `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Swap:` + strings.Replace(fmt.Sprintf("%v", this.Swap), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Kernel:` + strings.Replace(fmt.Sprintf("%v", this.Kernel), "MemoryEntry", "MemoryEntry", 1) + `,`, - `KernelTCP:` + strings.Replace(fmt.Sprintf("%v", this.KernelTCP), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2289,19 +2707,72 @@ func (this *MemoryEntry) String() string { }, "") return s } +func (this *MemoryOomControl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryOomControl{`, + `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, + `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, + `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func (this *BlkIOStat) String() string { if this == nil { return "nil" } + repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceBytesRecursive { + repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceBytesRecursive += "}" + repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServicedRecursive { + repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServicedRecursive += "}" + repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoQueuedRecursive { + repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoQueuedRecursive += "}" + repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceTimeRecursive { + repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceTimeRecursive += "}" + repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoWaitTimeRecursive { + repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoWaitTimeRecursive += "}" + repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoMergedRecursive { + repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoMergedRecursive += "}" + repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoTimeRecursive { + repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoTimeRecursive += "}" + repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" + for _, f := range this.SectorsRecursive { + repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForSectorsRecursive += "}" s := strings.Join([]string{`&BlkIOStat{`, - `IoServiceBytesRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceBytesRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoServicedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServicedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoQueuedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoQueuedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoServiceTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoWaitTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoWaitTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoMergedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoMergedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `SectorsRecursive:` + strings.Replace(fmt.Sprintf("%v", this.SectorsRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, + `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, + `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, + `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, + `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, + `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, + `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, + `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, + `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2326,9 +2797,19 @@ func (this *RdmaStat) String() string { if this == nil { return "nil" } + repeatedStringForCurrent := "[]*RdmaEntry{" + for _, f := range this.Current { + repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForCurrent += "}" + repeatedStringForLimit := "[]*RdmaEntry{" + for _, f := range this.Limit { + repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForLimit += "}" s := strings.Join([]string{`&RdmaStat{`, - `Current:` + strings.Replace(fmt.Sprintf("%v", this.Current), "RdmaEntry", "RdmaEntry", 1) + `,`, - `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "RdmaEntry", "RdmaEntry", 1) + `,`, + `Current:` + repeatedStringForCurrent + `,`, + `Limit:` + repeatedStringForLimit + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2366,6 +2847,21 @@ func (this *NetworkStat) String() string { }, "") return s } +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringMetrics(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2651,16 +3147,85 @@ func (m *Metrics) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryOomControl == nil { + m.MemoryOomControl = &MemoryOomControl{} + } + if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -2800,10 +3365,7 @@ func (m *HugetlbStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -2892,10 +3454,7 @@ func (m *PidsStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -3018,10 +3577,7 @@ func (m *CPUStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -3205,10 +3761,7 @@ func (m *CPUUsage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -3316,10 +3869,7 @@ func (m *Throttle) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4043,12 +4593,173 @@ func (m *MemoryStat) Unmarshal(dAtA []byte) error { if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) } - var msglen int + m.Max = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetrics @@ -4058,33 +4769,16 @@ func (m *MemoryStat) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Max |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kernel == nil { - m.Kernel = &MemoryEntry{} - } - if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) } - var msglen int + m.Failcnt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetrics @@ -4094,38 +4788,18 @@ func (m *MemoryStat) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Failcnt |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KernelTCP == nil { - m.KernelTCP = &MemoryEntry{} - } - if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4141,7 +4815,7 @@ func (m *MemoryStat) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemoryEntry) Unmarshal(dAtA []byte) error { +func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4164,17 +4838,17 @@ func (m *MemoryEntry) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) } - m.Limit = 0 + m.OomKillDisable = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetrics @@ -4184,16 +4858,16 @@ func (m *MemoryEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= uint64(b&0x7F) << shift + m.OomKillDisable |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) } - m.Usage = 0 + m.UnderOom = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetrics @@ -4203,35 +4877,16 @@ func (m *MemoryEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Usage |= uint64(b&0x7F) << shift + m.UnderOom |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) } - m.Failcnt = 0 + m.OomKill = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetrics @@ -4241,7 +4896,7 @@ func (m *MemoryEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift + m.OomKill |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4252,10 +4907,7 @@ func (m *MemoryEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4578,10 +5230,7 @@ func (m *BlkIOStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4753,10 +5402,7 @@ func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4875,10 +5521,7 @@ func (m *RdmaStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -4999,10 +5642,7 @@ func (m *RdmaEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -5237,10 +5877,153 @@ func (m *NetworkStat) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthMetrics } if (iNdEx + skippy) > l { @@ -5259,6 +6042,7 @@ func (m *NetworkStat) Unmarshal(dAtA []byte) error { func skipMetrics(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5290,10 +6074,8 @@ func skipMetrics(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5314,55 +6096,30 @@ func skipMetrics(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthMetrics } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipMetrics(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") ) diff --git a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt index 7a960c67..e476cea6 100644 --- a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt +++ b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -63,6 +63,22 @@ file { type_name: ".io.containerd.cgroups.v1.NetworkStat" json_name: "network" } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } } message_type { name: "HugetlbStat" @@ -494,6 +510,30 @@ file { json_name: "failcnt" } } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } message_type { name: "BlkIOStat" field { @@ -708,5 +748,43 @@ file { json_name: "txDropped" } } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } syntax: "proto3" } diff --git a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto index 62b51980..b3f6cc37 100644 --- a/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto +++ b/registry-library/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -12,6 +12,8 @@ message Metrics { BlkIOStat blkio = 5; RdmaStat rdma = 6; repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; } message HugetlbStat { @@ -93,6 +95,12 @@ message MemoryEntry { uint64 failcnt = 4; } +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + message BlkIOStat { repeated BlkIOEntry io_service_bytes_recursive = 1; repeated BlkIOEntry io_serviced_recursive = 2; @@ -134,3 +142,17 @@ message NetworkStat { uint64 tx_errors = 8; uint64 tx_dropped = 9; } + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/registry-library/vendor/github.com/containerd/containerd/archive/compression/compression.go b/registry-library/vendor/github.com/containerd/containerd/archive/compression/compression.go index 2338de6b..a883e4d5 100644 --- a/registry-library/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/registry-library/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -29,6 +29,7 @@ import ( "sync" "github.com/containerd/containerd/log" + "github.com/klauspost/compress/zstd" ) type ( @@ -41,6 +42,8 @@ const ( Uncompressed Compression = iota // Gzip is gzip compression algorithm. Gzip + // Zstd is zstd compression algorithm. + Zstd ) const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" @@ -126,6 +129,7 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) { func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Gzip: {0x1F, 0x8B, 0x08}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, } { if len(source) < len(m) { // Len too short @@ -174,6 +178,19 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { return gzReader.Close() }, }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + compression: compression, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil default: return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) @@ -187,6 +204,8 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er return &writeCloserWrapper{dest, nil}, nil case Gzip: return gzip.NewWriter(dest), nil + case Zstd: + return zstd.NewWriter(dest) default: return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } @@ -197,6 +216,8 @@ func (compression *Compression) Extension() string { switch *compression { case Gzip: return "gz" + case Zstd: + return "zst" } return "" } diff --git a/registry-library/vendor/github.com/containerd/containerd/content/adaptor.go b/registry-library/vendor/github.com/containerd/containerd/content/adaptor.go new file mode 100644 index 00000000..88bad261 --- /dev/null +++ b/registry-library/vendor/github.com/containerd/containerd/content/adaptor.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. +func AdaptInfo(info Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/registry-library/vendor/github.com/containerd/containerd/content/content.go b/registry-library/vendor/github.com/containerd/containerd/content/content.go index d8141a68..ff17a841 100644 --- a/registry-library/vendor/github.com/containerd/containerd/content/content.go +++ b/registry-library/vendor/github.com/containerd/containerd/content/content.go @@ -37,7 +37,7 @@ type Provider interface { // ReaderAt only requires desc.Digest to be set. // Other fields in the descriptor may be used internally for resolving // the location of the actual data. - ReaderAt(ctx context.Context, dec ocispec.Descriptor) (ReaderAt, error) + ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) } // Ingester writes content diff --git a/registry-library/vendor/github.com/containerd/containerd/content/local/locks.go b/registry-library/vendor/github.com/containerd/containerd/content/local/locks.go index bc3bd18e..d1d2d564 100644 --- a/registry-library/vendor/github.com/containerd/containerd/content/local/locks.go +++ b/registry-library/vendor/github.com/containerd/containerd/content/local/locks.go @@ -18,6 +18,7 @@ package local import ( "sync" + "time" "github.com/containerd/containerd/errdefs" "github.com/pkg/errors" @@ -25,9 +26,13 @@ import ( // Handles locking references +type lock struct { + since time.Time +} + var ( // locks lets us lock in process - locks = map[string]struct{}{} + locks = make(map[string]*lock) locksMu sync.Mutex ) @@ -35,11 +40,11 @@ func tryLock(ref string) error { locksMu.Lock() defer locksMu.Unlock() - if _, ok := locks[ref]; ok { - return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref) + if v, ok := locks[ref]; ok { + return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since) } - locks[ref] = struct{}{} + locks[ref] = &lock{time.Now()} return nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/content/local/readerat.go b/registry-library/vendor/github.com/containerd/containerd/content/local/readerat.go index 42b99dc4..5d3ae039 100644 --- a/registry-library/vendor/github.com/containerd/containerd/content/local/readerat.go +++ b/registry-library/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -18,6 +18,11 @@ package local import ( "os" + + "github.com/pkg/errors" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" ) // readerat implements io.ReaderAt in a completely stateless manner by opening @@ -27,6 +32,29 @@ type sizeReaderAt struct { fp *os.File } +// OpenReader creates ReaderAt from a file +func OpenReader(p string) (content.ReaderAt, error) { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { return ra.fp.ReadAt(p, offset) } diff --git a/registry-library/vendor/github.com/containerd/containerd/content/local/store.go b/registry-library/vendor/github.com/containerd/containerd/content/local/store.go index 87056814..cb6aae82 100644 --- a/registry-library/vendor/github.com/containerd/containerd/content/local/store.go +++ b/registry-library/vendor/github.com/containerd/containerd/content/local/store.go @@ -131,25 +131,13 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content. if err != nil { return nil, errors.Wrapf(err, "calculating blob path for ReaderAt") } - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p) - } - - fp, err := os.Open(p) + reader, err := OpenReader(p) if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p) + return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p) } - return sizeReaderAt{size: fi.Size(), fp: fp}, nil + return reader, nil } // Delete removes a blob by its digest. @@ -240,9 +228,14 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str return info, nil } -func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { - // TODO: Support filters +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { root := filepath.Join(s.root, "blobs") + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + var alg digest.Algorithm return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { if err != nil { @@ -286,7 +279,12 @@ func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string return err } } - return fn(s.info(dgst, fi, labels)) + + info := s.info(dgst, fi, labels) + if !filter.Match(content.AdaptInfo(info)) { + return nil + } + return fn(info) }) } @@ -467,7 +465,6 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content. } var lockErr error for count := uint64(0); count < 10; count++ { - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) } } - if platform.Variant == "v6" { - return orderedPlatformComparer{ - matchers: []Matcher{ - &matcher{ - Platform: platform, - }, - &matcher{ - Platform: specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v5", - }, - }, - }, - } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) } - return singlePlatformComparer{ - Matcher: &matcher{ - Platform: platform, - }, - } + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) } // Ordered returns a platform MatchComparer which matches any of the platforms @@ -153,14 +125,6 @@ func Any(platforms ...specs.Platform) MatchComparer { // with preference for ordering. var All MatchComparer = allPlatformComparer{} -type singlePlatformComparer struct { - Matcher -} - -func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { - return c.Match(p1) && !c.Match(p2) -} - type orderedPlatformComparer struct { matchers []Matcher } diff --git a/registry-library/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/registry-library/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index db65a726..4a7177e3 100644 --- a/registry-library/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/registry-library/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -21,6 +21,7 @@ import ( "os" "runtime" "strings" + "sync" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" @@ -28,14 +29,18 @@ import ( ) // Present the ARM instruction set architecture, eg: v7, v8 -var cpuVariant string +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string -func init() { - if isArmArch(runtime.GOARCH) { - cpuVariant = getCPUVariant() - } else { - cpuVariant = "" - } +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue } // For Linux, the kernel has already detected the ABI, ISA and Features. @@ -96,14 +101,18 @@ func getCPUVariant() string { return "" } + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + switch strings.ToLower(variant) { case "8", "aarch64": - // special case: if running a 32-bit userspace on aarch64, the variant should be "v7" - if runtime.GOARCH == "arm" { - variant = "v7" - } else { - variant = "v8" - } + variant = "v8" case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" case "6", "6tej": diff --git a/registry-library/vendor/github.com/containerd/containerd/platforms/defaults.go b/registry-library/vendor/github.com/containerd/containerd/platforms/defaults.go index a14d80e5..cb77fbc9 100644 --- a/registry-library/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/registry-library/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -33,6 +33,11 @@ func DefaultSpec() specs.Platform { OS: runtime.GOOS, Architecture: runtime.GOARCH, // The Variant field will be empty if arch != ARM. - Variant: cpuVariant, + Variant: cpuVariant(), } } + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/registry-library/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/registry-library/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index 0defbd36..0c380e3b 100644 --- a/registry-library/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/registry-library/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -19,13 +19,63 @@ package platforms import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" ) -// Default returns the default matcher for the platform. +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p imagespec.Platform) bool { + if m.defaults.Match(p) { + // TODO(windows): Figure out whether OSVersion is deprecated. + return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: "amd64", - }) + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } } diff --git a/registry-library/vendor/github.com/containerd/containerd/platforms/platforms.go b/registry-library/vendor/github.com/containerd/containerd/platforms/platforms.go index 77d3f184..088bdea0 100644 --- a/registry-library/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/registry-library/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -189,8 +189,8 @@ func Parse(specifier string) (specs.Platform, error) { if isKnownOS(p.OS) { // picks a default architecture p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant != "v7" { - p.Variant = cpuVariant + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() } return p, nil diff --git a/registry-library/vendor/github.com/containerd/containerd/reference/reference.go b/registry-library/vendor/github.com/containerd/containerd/reference/reference.go index 562ab0d4..a4bf6da6 100644 --- a/registry-library/vendor/github.com/containerd/containerd/reference/reference.go +++ b/registry-library/vendor/github.com/containerd/containerd/reference/reference.go @@ -85,6 +85,10 @@ var splitRe = regexp.MustCompile(`[:@]`) // Parse parses the string into a structured ref. func Parse(s string) (Spec, error) { + if strings.Contains(s, "://") { + return Spec{}, ErrInvalid + } + u, err := url.Parse("dummy://" + s) if err != nil { return Spec{}, err diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go new file mode 100644 index 00000000..0d01c81a --- /dev/null +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/log" + remoteserrors "github.com/containerd/containerd/remotes/errors" + "github.com/pkg/errors" + "golang.org/x/net/context/ctxhttp" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +// GenerateTokenOptions generates options for fetching a token based on a challenge +func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { + realm, ok := c.Parameters["realm"] + if !ok { + return TokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") + } + + to := TokenOptions{ + Realm: realmURL.String(), + Service: c.Parameters["service"], + Username: username, + Secret: secret, + } + + scope, ok := c.Parameters["scope"] + if ok { + to.Scopes = append(to.Scopes, scope) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + return to, nil +} + +// TokenOptions are options for requesting a token +type TokenOptions struct { + Realm string + Service string + Scopes []string + Username string + Secret string +} + +// OAuthTokenResponse is response from fetching token with a OAuth POST request +type OAuthTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +// FetchTokenWithOAuth fetches a token using a POST request +func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { + form := url.Values{} + if len(to.Scopes) > 0 { + form.Set("scope", strings.Join(to.Scopes, " ")) + } + form.Set("service", to.Service) + form.Set("client_id", clientID) + + if to.Username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.Secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.Username) + form.Set("password", to.Secret) + } + + req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr OAuthTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + if tr.AccessToken == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} + +// FetchTokenResponse is response from fetching token with GET request +type FetchTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// FetchToken fetches a token using a GET request +func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { + req, err := http.NewRequest("GET", to.Realm, nil) + if err != nil { + return nil, err + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + + reqParams := req.URL.Query() + + if to.Service != "" { + reqParams.Add("service", to.Service) + } + + for _, scope := range to.Scopes { + reqParams.Add("scope", scope) + } + + if to.Secret != "" { + req.SetBasicAuth(to.Username, to.Secret) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr FetchTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go similarity index 81% rename from registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth.go rename to registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go index 70cfdea4..223fa2d0 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go @@ -14,7 +14,7 @@ limitations under the License. */ -package docker +package auth import ( "net/http" @@ -22,31 +22,35 @@ import ( "strings" ) -type authenticationScheme byte +// AuthenticationScheme defines scheme of the authentication method +type AuthenticationScheme byte const ( - basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617 - digestAuth // Defined in RFC 7616 - bearerAuth // Defined in RFC 6750 + // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 + BasicAuth AuthenticationScheme = 1 << iota + // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 + DigestAuth + // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 + BearerAuth ) -// challenge carries information from a WWW-Authenticate response header. +// Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. -type challenge struct { +type Challenge struct { // scheme is the auth-scheme according to RFC 2617 - scheme authenticationScheme + Scheme AuthenticationScheme // parameters are the auth-params according to RFC 2617 - parameters map[string]string + Parameters map[string]string } -type byScheme []challenge +type byScheme []Challenge func (bs byScheme) Len() int { return len(bs) } func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } // Sort in priority order: token > digest > basic -func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme } +func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } // Octet types from RFC 2616. type octetType byte @@ -90,22 +94,23 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} +// ParseAuthHeader parses challenges from WWW-Authenticate header +func ParseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) - var s authenticationScheme + var s AuthenticationScheme switch v { case "basic": - s = basicAuth + s = BasicAuth case "digest": - s = digestAuth + s = DigestAuth case "bearer": - s = bearerAuth + s = BearerAuth default: continue } - challenges = append(challenges, challenge{scheme: s, parameters: p}) + challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) } sort.Stable(byScheme(challenges)) return challenges diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 001423a0..67e4aea8 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -19,21 +19,17 @@ package docker import ( "context" "encoding/base64" - "encoding/json" "fmt" - "io" - "io/ioutil" "net/http" - "net/url" "strings" "sync" - "time" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker/auth" + remoteerrors "github.com/containerd/containerd/remotes/errors" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" ) type dockerAuthorizer struct { @@ -135,8 +131,8 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R a.mu.Lock() defer a.mu.Unlock() - for _, c := range parseAuthHeader(last.Header) { - if c.scheme == bearerAuth { + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { if err := invalidAuthorization(c, responses); err != nil { delete(a.handlers, host) return err @@ -152,26 +148,35 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return nil } - common, err := a.generateTokenOptions(ctx, host, c) + var username, secret string + if a.credentials != nil { + var err error + username, secret, err = a.credentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) if err != nil { return err } - a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) return nil - } else if c.scheme == basicAuth && a.credentials != nil { + } else if c.Scheme == auth.BasicAuth && a.credentials != nil { username, secret, err := a.credentials(host) if err != nil { return err } if username != "" && secret != "" { - common := tokenOptions{ - username: username, - secret: secret, + common := auth.TokenOptions{ + Username: username, + Secret: secret, } - a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) return nil } } @@ -179,38 +184,6 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") } -func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { - realm, ok := c.parameters["realm"] - if !ok { - return tokenOptions{}, errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") - } - - to := tokenOptions{ - realm: realmURL.String(), - service: c.parameters["service"], - } - - scope, ok := c.parameters["scope"] - if ok { - to.scopes = append(to.scopes, scope) - } else { - log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") - } - - if a.credentials != nil { - to.username, to.secret, err = a.credentials(host) - if err != nil { - return tokenOptions{}, err - } - } - return to, nil -} - // authResult is used to control limit rate. type authResult struct { sync.WaitGroup @@ -227,17 +200,17 @@ type authHandler struct { client *http.Client // only support basic and bearer schemes - scheme authenticationScheme + scheme auth.AuthenticationScheme // common contains common challenge answer - common tokenOptions + common auth.TokenOptions // scopedTokens caches token indexed by scopes, which used in // bearer auth case scopedTokens map[string]*authResult } -func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { +func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { return &authHandler{ header: hdr, client: client, @@ -249,17 +222,17 @@ func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationS func (ah *authHandler) authorize(ctx context.Context) (string, error) { switch ah.scheme { - case basicAuth: + case auth.BasicAuth: return ah.doBasicAuth(ctx) - case bearerAuth: + case auth.BearerAuth: return ah.doBearerAuth(ctx) default: - return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) } } func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { - username, secret := ah.common.username, ah.common.secret + username, secret := ah.common.Username, ah.common.Secret if username == "" || secret == "" { return "", fmt.Errorf("failed to handle basic auth because missing username or secret") @@ -269,14 +242,14 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { return fmt.Sprintf("Basic %s", auth), nil } -func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { +func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) { // copy common tokenOptions to := ah.common - to.scopes = GetTokenScopes(ctx, to.scopes) + to.Scopes = GetTokenScopes(ctx, to.Scopes) // Docs: https://docs.docker.com/registry/spec/auth/scope - scoped := strings.Join(to.scopes, " ") + scoped := strings.Join(to.Scopes, " ") ah.Lock() if r, exist := ah.scopedTokens[scoped]; exist { @@ -291,174 +264,52 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { ah.scopedTokens[scoped] = r ah.Unlock() + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.err = token, err + r.Done() + }() + // fetch token for the resource scope - var ( - token string - err error - ) - if to.secret != "" { + if to.Secret != "" { + defer func() { + err = errors.Wrap(err, "failed to fetch oauth token") + }() // credential information is provided, use oauth POST endpoint - token, err = ah.fetchTokenWithOAuth(ctx, to) - err = errors.Wrap(err, "failed to fetch oauth token") - } else { - // do request anonymously - token, err = ah.fetchToken(ctx, to) - err = errors.Wrap(err, "failed to fetch anonymous token") - } - token = fmt.Sprintf("Bearer %s", token) - - r.token, r.err = token, err - r.Done() - return r.token, r.err -} - -type tokenOptions struct { - realm string - service string - scopes []string - username string - secret string -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { - form := url.Values{} - if len(to.scopes) > 0 { - form.Set("scope", strings.Join(to.scopes, " ")) - } - form.Set("service", to.service) - // TODO: Allow setting client_id - form.Set("client_id", "containerd-client") - - if to.username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", to.secret) - } else { - form.Set("grant_type", "password") - form.Set("username", to.username) - form.Set("password", to.secret) - } - - req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode())) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - if ah.header != nil { - for k, v := range ah.header { - req.Header[k] = append(req.Header[k], v...) - } - } - - resp, err := ctxhttp.Do(ctx, ah.client, req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // Registries without support for POST may return 404 for POST /v2/token. - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return ah.fetchToken(ctx, to) - } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { - b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "body": string(b), - }).Debugf("token request failed") - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - return tr.AccessToken, nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// fetchToken fetches a token using a GET request -func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { - req, err := http.NewRequest("GET", to.realm, nil) - if err != nil { - return "", err - } - - if ah.header != nil { - for k, v := range ah.header { - req.Header[k] = append(req.Header[k], v...) + // TODO: Allow setting client_id + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) + if err != nil { + var errStatus remoteerrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", err + } + return resp.Token, nil + } + log.G(ctx).WithFields(logrus.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", err } + return resp.AccessToken, nil } - - reqParams := req.URL.Query() - - if to.service != "" { - reqParams.Add("service", to.service) - } - - for _, scope := range to.scopes { - reqParams.Add("scope", scope) - } - - if to.secret != "" { - req.SetBasicAuth(to.username, to.secret) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, ah.client, req) + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) + return "", errors.Wrap(err, "failed to fetch anonymous token") } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", ErrNoToken - } - - return tr.Token, nil + return resp.Token, nil } -func invalidAuthorization(c challenge, responses []*http.Response) error { - errStr := c.parameters["error"] +func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { + errStr := c.Parameters["error"] if errStr == "" { return nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/errcode.go index 7d1e54f2..8c623bcb 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/errcode.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/errcode.go @@ -163,7 +163,7 @@ type ErrorDescriptor struct { // keyed value when serializing api errors. Value string - // Message is a short, human readable decription of the error condition + // Message is a short, human readable description of the error condition // included in API responses. Message string diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index cd0168be..9d6708de 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -45,7 +45,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") } - ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) if err != nil { return nil, err } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go index 9175b6a7..704eba42 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -121,7 +121,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { rc, err := hrs.open(hrs.offset) if err != nil { - return nil, errors.Wrapf(err, "httpReaderSeeker: failed open") + return nil, errors.Wrapf(err, "httpReadSeeker: failed open") } if hrs.rc != nil { diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index 98ea515d..8028d142 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -30,6 +30,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" + remoteserrors "github.com/containerd/containerd/remotes/errors" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -43,17 +44,47 @@ type dockerPusher struct { tracker StatusTracker } +// Writer implements Ingester API of content store. This allows the client +// to receive ErrUnavailable when there is already an on-going upload. +// Note that the tracker MUST implement StatusTrackLocker interface to avoid +// race condition on StatusTracker. +func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + return p.push(ctx, wOpts.Desc, wOpts.Ref, true) +} + func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - ctx, err := contextWithRepositoryScope(ctx, p.refspec, true) + return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) +} + +func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { + if l, ok := p.tracker.(StatusTrackLocker); ok { + l.Lock(ref) + defer l.Unlock(ref) + } + ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) if err != nil { return nil, err } - ref := remotes.MakeRefKey(ctx, desc) status, err := p.tracker.GetStatus(ref) if err == nil { - if status.Offset == status.Total { + if status.Committed && status.Offset == status.Total { return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) } + if unavailableOnFail { + // Another push of this ref is happening elsewhere. The rest of function + // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there + // is no actively-tracked ref already). + return nil, errors.Wrap(errdefs.ErrUnavailable, "push is on-going") + } // TODO: Handle incomplete status } else if !errdefs.IsNotFound(err) { return nil, errors.Wrap(err, "failed to get status") @@ -104,16 +135,20 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten if exists { p.tracker.SetStatus(ref, Status{ + Committed: true, Status: content.Status{ - Ref: ref, + Ref: ref, + Total: desc.Size, + Offset: desc.Size, // TODO: Set updated time? }, }) return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) } } else if resp.StatusCode != http.StatusNotFound { - // TODO: log error - return nil, errors.Errorf("unexpected response: %s", resp.Status) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err } } @@ -128,7 +163,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten var resp *http.Response if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) - pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) + pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) // NOTE: the fromRepo might be private repo and // auth service still can grant token without error. @@ -136,7 +171,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten // // for the private repo, we should remove mount-from // query and send the request again. - resp, err = preq.do(pctx) + resp, err = preq.doWithRetries(pctx, nil) if err != nil { return nil, err } @@ -160,14 +195,18 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten case http.StatusOK, http.StatusAccepted, http.StatusNoContent: case http.StatusCreated: p.tracker.SetStatus(ref, Status{ + Committed: true, Status: content.Status{ - Ref: ref, + Ref: ref, + Total: desc.Size, + Offset: desc.Size, }, }) return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) default: - // TODO: log error - return nil, errors.Errorf("unexpected response: %s", resp.Status) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err } var ( @@ -219,7 +258,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten // TODO: Support chunked upload pr, pw := io.Pipe() - respC := make(chan *http.Response, 1) + respC := make(chan response, 1) body := ioutil.NopCloser(pr) req.body = func() (io.ReadCloser, error) { @@ -235,8 +274,9 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten go func() { defer close(respC) - resp, err := req.do(ctx) + resp, err := req.doWithRetries(ctx, nil) if err != nil { + respC <- response{err: err} pr.CloseWithError(err) return } @@ -244,10 +284,11 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent: default: - // TODO: log error - pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status)) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + pr.CloseWithError(err) } - respC <- resp + respC <- response{Response: resp} }() return &pushWriter{ @@ -280,12 +321,17 @@ func getManifestPath(object string, dgst digest.Digest) []string { return []string{"manifests", object} } +type response struct { + *http.Response + err error +} + type pushWriter struct { base *dockerBase ref string pipe *io.PipeWriter - responseC <-chan *http.Response + responseC <-chan response isManifest bool expected digest.Digest @@ -331,12 +377,10 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di if err := pw.pipe.Close(); err != nil { return err } - // TODO: Update status to determine committing - // TODO: timeout waiting for response resp := <-pw.responseC - if resp == nil { - return errors.New("no response") + if resp.err != nil { + return resp.err } // 201 is specified return status, some registries return @@ -344,7 +388,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: default: - return errors.Errorf("unexpected status: %s", resp.Status) + return remoteserrors.NewUnexpectedStatusErr(resp.Response) } status, err := pw.tracker.GetStatus(pw.ref) @@ -369,6 +413,10 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di return errors.Errorf("got digest %s, expected %s", actual, expected) } + status.Committed = true + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/registry.go index 7c231d92..1e77d4c8 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/registry.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -17,7 +17,10 @@ package docker import ( + "net" "net/http" + + "github.com/pkg/errors" ) // HostCapabilities represent the capabilities of the registry @@ -56,6 +59,7 @@ const ( // Reserved for future capabilities (i.e. search, catalog, remove) ) +// Has checks whether the capabilities list has the provide capability func (c HostCapabilities) Has(t HostCapabilities) bool { return c&t == t } @@ -201,12 +205,41 @@ func MatchAllHosts(string) (bool, error) { // MatchLocalhost is a host match function which returns true for // localhost. +// +// Note: this does not handle matching of ip addresses in octal, +// decimal or hex form. func MatchLocalhost(host string) (bool, error) { - for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { - if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { - return true, nil + switch { + case host == "::1": + return true, nil + case host == "[::1]": + return true, nil + } + h, p, err := net.SplitHostPort(host) + + // addrError helps distinguish between errors of form + // "no colon in address" and "too many colons in address". + // The former is fine as the host string need not have a + // port. Latter needs to be handled. + addrError := &net.AddrError{ + Err: "missing port in address", + Addr: host, + } + if err != nil { + if err.Error() != addrError.Error() { + return false, err } + // host string without any port specified + h = host + } else if len(p) == 0 { + return false, errors.New("invalid host name format") + } + + // use ipv4 dotted decimal for further checking + if h == "localhost" { + h = "127.0.0.1" } - return host == "::1", nil + ip := net.ParseIP(h) + return ip.IsLoopback(), nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 53e42ecc..866379eb 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -41,10 +41,6 @@ import ( ) var ( - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") - // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") @@ -223,20 +219,15 @@ func (r *countingReader) Read(p []byte) (int, error) { var _ remotes.Resolver = &dockerResolver{} func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { - refspec, err := reference.Parse(ref) + base, err := r.resolveDockerBase(ref) if err != nil { return "", ocispec.Descriptor{}, err } - + refspec := base.refspec if refspec.Object == "" { return "", ocispec.Descriptor{}, reference.ErrObjectRequired } - base, err := r.base(refspec) - if err != nil { - return "", ocispec.Descriptor{}, err - } - var ( lastErr error paths [][]string @@ -267,7 +258,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") } - ctx, err = contextWithRepositoryScope(ctx, refspec, false) + ctx, err = ContextWithRepositoryScope(ctx, refspec, false) if err != nil { return "", ocispec.Descriptor{}, err } @@ -295,12 +286,14 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if lastErr == nil { lastErr = err } + log.G(ctx).WithError(err).Info("trying next host") continue // try another host } resp.Body.Close() // don't care about body contents. if resp.StatusCode > 299 { if resp.StatusCode == http.StatusNotFound { + log.G(ctx).Info("trying next host - response was http.StatusNotFound") continue } return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) @@ -391,12 +384,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - base, err := r.base(refspec) + base, err := r.resolveDockerBase(ref) if err != nil { return nil, err } @@ -407,23 +395,27 @@ func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetch } func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - base, err := r.base(refspec) + base, err := r.resolveDockerBase(ref) if err != nil { return nil, err } return dockerPusher{ dockerBase: base, - object: refspec.Object, + object: base.refspec.Object, tracker: r.tracker, }, nil } +func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + return r.base(refspec) +} + type dockerBase struct { refspec reference.Spec repository string @@ -455,10 +447,11 @@ func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { } func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { - header := http.Header{} - for key, value := range r.header { - header[key] = append(header[key], value...) + header := r.header.Clone() + if header == nil { + header = http.Header{} } + for key, value := range host.Header { header[key] = append(header[key], value...) } @@ -525,7 +518,10 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { if err != nil { return nil, err } - req.Header = r.header + req.Header = http.Header{} // headers need to be copied to avoid concurrent map access + for k, v := range r.header { + req.Header[k] = v + } if r.body != nil { body, err := r.body() if err != nil { diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/scope.go index c8541c45..fe57f023 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -26,10 +26,10 @@ import ( "github.com/containerd/containerd/reference" ) -// repositoryScope returns a repository scope string such as "repository:foo/bar:pull" +// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" // for "host/foo/bar:baz". // When push is true, both pull and push are added to the scope. -func repositoryScope(refspec reference.Spec, push bool) (string, error) { +func RepositoryScope(refspec reference.Spec, push bool) (string, error) { u, err := url.Parse("dummy://" + refspec.Locator) if err != nil { return "", err @@ -45,9 +45,9 @@ func repositoryScope(refspec reference.Spec, push bool) (string, error) { // value: []string (e.g. {"registry:foo/bar:pull"}) type tokenScopesKey struct{} -// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. -func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { - s, err := repositoryScope(refspec, push) +// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. +func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { + s, err := RepositoryScope(refspec, push) if err != nil { return nil, err } @@ -66,9 +66,9 @@ func WithScope(ctx context.Context, scope string) context.Context { return context.WithValue(ctx, tokenScopesKey{}, scopes) } -// contextWithAppendPullRepositoryScope is used to append repository pull +// ContextWithAppendPullRepositoryScope is used to append repository pull // scope into existing scopes indexed by the tokenScopesKey{}. -func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { +func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/status.go b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/status.go index 8069d676..9751edac 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/docker/status.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -21,6 +21,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/moby/locker" "github.com/pkg/errors" ) @@ -28,6 +29,8 @@ import ( type Status struct { content.Status + Committed bool + // UploadUUID is used by the Docker registry to reference blob uploads UploadUUID string } @@ -38,15 +41,24 @@ type StatusTracker interface { SetStatus(string, Status) } +// StatusTrackLocker to track status of operations with lock +type StatusTrackLocker interface { + StatusTracker + Lock(string) + Unlock(string) +} + type memoryStatusTracker struct { statuses map[string]Status m sync.Mutex + locker *locker.Locker } // NewInMemoryTracker returns a StatusTracker that tracks content status in-memory -func NewInMemoryTracker() StatusTracker { +func NewInMemoryTracker() StatusTrackLocker { return &memoryStatusTracker{ statuses: map[string]Status{}, + locker: locker.New(), } } @@ -65,3 +77,11 @@ func (t *memoryStatusTracker) SetStatus(ref string, status Status) { t.statuses[ref] = status t.m.Unlock() } + +func (t *memoryStatusTracker) Lock(ref string) { + t.locker.Lock(ref) +} + +func (t *memoryStatusTracker) Unlock(ref string) { + t.locker.Unlock(ref) +} diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/registry-library/vendor/github.com/containerd/containerd/remotes/errors/errors.go new file mode 100644 index 00000000..519dbac1 --- /dev/null +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errors + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +var _ error = ErrUnexpectedStatus{} + +// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status +type ErrUnexpectedStatus struct { + Status string + StatusCode int + Body []byte + RequestURL, RequestMethod string +} + +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("unexpected status: %s", e.Status) +} + +// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response +func NewUnexpectedStatusErr(resp *http.Response) error { + var b []byte + if resp.Body != nil { + b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + } + err := ErrUnexpectedStatus{ + Body: b, + Status: resp.Status, + StatusCode: resp.StatusCode, + RequestMethod: resp.Request.Method, + } + if resp.Request.URL != nil { + err.RequestURL = resp.Request.URL.String() + } + return err +} diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/handlers.go b/registry-library/vendor/github.com/containerd/containerd/remotes/handlers.go index 671fea10..8f79c608 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -31,6 +31,7 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" ) type refKeyPrefix struct{} @@ -55,25 +56,32 @@ func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) conte // used to lookup ongoing processes related to the descriptor. This function // may look to the context to namespace the reference appropriately. func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + key := desc.Digest.String() + if desc.Annotations != nil { + if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { + key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) + } + } + if v := ctx.Value(refKeyPrefix{}); v != nil { values := v.(map[string]string) if prefix := values[desc.MediaType]; prefix != "" { - return prefix + "-" + desc.Digest.String() + return prefix + "-" + key } } switch mt := desc.MediaType; { case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: - return "manifest-" + desc.Digest.String() + return "manifest-" + key case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: - return "index-" + desc.Digest.String() + return "index-" + key case images.IsLayerType(mt): - return "layer-" + desc.Digest.String() + return "layer-" + key case images.IsKnownConfig(mt): - return "config-" + desc.Digest.String() + return "config-" + key default: log.G(ctx).Warnf("reference for unknown type: %s", mt) - return "unknown-" + desc.Digest.String() + return "unknown-" + key } } @@ -115,6 +123,12 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc return err } + if desc.Size == 0 { + // most likely a poorly configured registry/web front end which responded with no + // Content-Length header; unable (not to mention useless) to commit a 0-length entry + // into the content store. Error out here otherwise the error sent back is confusing + return errors.Wrapf(errdefs.ErrInvalidArgument, "unable to fetch descriptor (%s) which reports content size of zero", desc.Digest) + } if ws.Offset == desc.Size { // If writer is already complete, commit and return err := cw.Commit(ctx, desc.Size, desc.Digest) @@ -151,7 +165,15 @@ func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { log.G(ctx).Debug("push") - cw, err := pusher.Push(ctx, desc) + var ( + cw content.Writer + err error + ) + if cs, ok := pusher.(content.Ingester); ok { + cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + } else { + cw, err = pusher.Push(ctx, desc) + } if err != nil { if !errdefs.IsAlreadyExists(err) { return err @@ -175,7 +197,8 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + var m sync.Mutex manifestStack := []ocispec.Descriptor{} @@ -207,7 +230,7 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st handler = wrapper(handler) } - if err := images.Dispatch(ctx, handler, nil, desc); err != nil { + if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { return err } diff --git a/registry-library/vendor/github.com/containerd/containerd/remotes/resolver.go b/registry-library/vendor/github.com/containerd/containerd/remotes/resolver.go index 914d351f..624b14f0 100644 --- a/registry-library/vendor/github.com/containerd/containerd/remotes/resolver.go +++ b/registry-library/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -45,6 +45,8 @@ type Resolver interface { Fetcher(ctx context.Context, ref string) (Fetcher, error) // Pusher returns a new pusher for the provided reference + // The returned Pusher should satisfy content.Ingester and concurrent attempts + // to push the same blob using the Ingester API should result in ErrUnavailable. Pusher(ctx context.Context, ref string) (Pusher, error) } diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/filesys.go b/registry-library/vendor/github.com/containerd/containerd/sys/filesys.go deleted file mode 100644 index 825d21d1..00000000 --- a/registry-library/vendor/github.com/containerd/containerd/sys/filesys.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "os" - -// IsFifo checks if a file is a (named pipe) fifo -// if the file does not exist then it returns false -func IsFifo(path string) (bool, error) { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return true, nil - } - return false, nil -} diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/registry-library/vendor/github.com/containerd/containerd/sys/filesys_windows.go index 2eaee2ca..a9198ef3 100644 --- a/registry-library/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/registry-library/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -22,11 +22,14 @@ import ( "os" "path/filepath" "regexp" + "sort" + "strconv" "strings" "syscall" "unsafe" "github.com/Microsoft/hcsshim" + "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -257,12 +260,71 @@ func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, return h, e } -// ForceRemoveAll is the same as os.RemoveAll, but uses hcsshim.DestroyLayer in order -// to delete container layers. +// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows +// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, +// when passed a containerd root data directory (i.e. the `--root` directory for containerd). func ForceRemoveAll(path string) error { + // snapshots/windows/windows.go init() + const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" + // snapshots/windows/windows.go NewSnapshotter() + snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") + if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { + if err := cleanupWCOWLayers(snapshotDir); err != nil { + return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + } + } + + return os.RemoveAll(path) +} + +func cleanupWCOWLayers(root string) error { + // See snapshots/windows/windows.go getSnapshotDir() + var layerNums []int + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if path != root && info.IsDir() { + if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { + layerNums = append(layerNums, layerNum) + } else { + return err + } + return filepath.SkipDir + } + + return nil + }); err != nil { + return err + } + + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + + for _, layerNum := range layerNums { + if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { + return err + } + } + + return nil +} + +func cleanupWCOWLayer(layerPath string) error { info := hcsshim.DriverInfo{ - HomeDir: filepath.Dir(path), + HomeDir: filepath.Dir(layerPath), } - return hcsshim.DestroyLayer(info, filepath.Base(path)) + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { + if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { + return errors.Wrapf(err, "failed to unprepare %s", layerPath) + } + } + + if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { + return errors.Wrapf(err, "failed to deactivate %s", layerPath) + } + + if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { + return errors.Wrapf(err, "failed to destroy %s", layerPath) + } + + return nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/oom_linux.go b/registry-library/vendor/github.com/containerd/containerd/sys/oom_linux.go new file mode 100644 index 00000000..82a347c6 --- /dev/null +++ b/registry-library/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -0,0 +1,83 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/containerd/containerd/pkg/userns" + "golang.org/x/sys/unix" +) + +const ( + // OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9 + OOMScoreAdjMin = -1000 + // OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10 + OOMScoreAdjMax = 1000 +) + +// AdjustOOMScore sets the oom score for the provided pid. If the provided score +// is out of range (-1000 - 1000), it is clipped to the min/max value. +func AdjustOOMScore(pid, score int) error { + if score > OOMScoreAdjMax { + score = OOMScoreAdjMax + } else if score < OOMScoreAdjMin { + score = OOMScoreAdjMin + } + return SetOOMScore(pid, score) +} + +// SetOOMScore sets the oom score for the provided pid +func SetOOMScore(pid, score int) error { + if score > OOMScoreAdjMax || score < OOMScoreAdjMin { + return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax) + } + path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + if _, err = f.WriteString(strconv.Itoa(score)); err != nil { + if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) { + return nil + } + return err + } + return nil +} + +// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either +// no oom score is set, or a sore is set to 0. +func GetOOMScoreAdj(pid int) (int, error) { + path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.Atoi(strings.TrimSpace(string(data))) +} + +// runningPrivileged returns true if the effective user ID of the +// calling process is 0 +func runningPrivileged() bool { + return unix.Geteuid() == 0 +} diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/oom_unix.go b/registry-library/vendor/github.com/containerd/containerd/sys/oom_unsupported.go similarity index 50% rename from registry-library/vendor/github.com/containerd/containerd/sys/oom_unix.go rename to registry-library/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index d49d5bc8..f5d7e978 100644 --- a/registry-library/vendor/github.com/containerd/containerd/sys/oom_unix.go +++ b/registry-library/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !linux /* Copyright The containerd Authors. @@ -18,40 +18,31 @@ package sys -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" +const ( + // OOMScoreMaxKillable is not implemented on non Linux + OOMScoreMaxKillable = 0 + // OOMScoreAdjMax is not implemented on non Linux + OOMScoreAdjMax = 0 ) -// OOMScoreMaxKillable is the maximum score keeping the process killable by the oom killer -const OOMScoreMaxKillable = -999 +// AdjustOOMScore sets the oom score for the provided pid. If the provided score +// is out of range (-1000 - 1000), it is clipped to the min/max value. +// +// Not implemented on Windows +func AdjustOOMScore(pid, score int) error { + return nil +} -// SetOOMScore sets the oom score for the provided pid +// SetOOMScore sets the oom score for the process +// +// Not implemented on Windows func SetOOMScore(pid, score int) error { - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - f, err := os.OpenFile(path, os.O_WRONLY, 0) - if err != nil { - return err - } - defer f.Close() - if _, err = f.WriteString(strconv.Itoa(score)); err != nil { - if os.IsPermission(err) && (RunningInUserNS() || RunningUnprivileged()) { - return nil - } - return err - } return nil } // GetOOMScoreAdj gets the oom score for a process +// +// Not implemented on Windows func GetOOMScoreAdj(pid int) (int, error) { - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.Atoi(strings.TrimSpace(string(data))) + return 0, nil } diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/registry-library/vendor/github.com/containerd/containerd/sys/stat_bsd.go index b9c95d90..4f03cd6c 100644 --- a/registry-library/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ b/registry-library/vendor/github.com/containerd/containerd/sys/stat_bsd.go @@ -1,4 +1,4 @@ -// +build darwin freebsd +// +build darwin freebsd netbsd /* Copyright The containerd Authors. diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/registry-library/vendor/github.com/containerd/containerd/sys/stat_openbsd.go new file mode 100644 index 00000000..ec3b9df6 --- /dev/null +++ b/registry-library/vendor/github.com/containerd/containerd/sys/stat_openbsd.go @@ -0,0 +1,45 @@ +// +build openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/registry-library/vendor/github.com/containerd/containerd/sys/oom_windows.go b/registry-library/vendor/github.com/containerd/containerd/sys/userns_deprecated.go similarity index 68% rename from registry-library/vendor/github.com/containerd/containerd/sys/oom_windows.go rename to registry-library/vendor/github.com/containerd/containerd/sys/userns_deprecated.go index a917ba63..53acf554 100644 --- a/registry-library/vendor/github.com/containerd/containerd/sys/oom_windows.go +++ b/registry-library/vendor/github.com/containerd/containerd/sys/userns_deprecated.go @@ -16,16 +16,8 @@ package sys -// SetOOMScore sets the oom score for the process -// -// Not implemented on Windows -func SetOOMScore(pid, score int) error { - return nil -} +import "github.com/containerd/containerd/pkg/userns" -// GetOOMScoreAdj gets the oom score for a process -// -// Not implemented on Windows -func GetOOMScoreAdj(pid int) (int, error) { - return 0, nil -} +// RunningInUserNS detects whether we are currently running in a user namespace. +// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. +var RunningInUserNS = userns.RunningInUserNS diff --git a/registry-library/vendor/github.com/containerd/containerd/version/version.go b/registry-library/vendor/github.com/containerd/containerd/version/version.go index 184ef00d..aca0c456 100644 --- a/registry-library/vendor/github.com/containerd/containerd/version/version.go +++ b/registry-library/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.4.3+unknown" + Version = "1.5.2+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/artifact/consts.go b/registry-library/vendor/github.com/deislabs/oras/pkg/artifact/consts.go deleted file mode 100644 index bc9e90ff..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/artifact/consts.go +++ /dev/null @@ -1,7 +0,0 @@ -package artifact - -const ( - // UnknownConfigMediaType is the default mediaType used when no - // config media type is specified. - UnknownConfigMediaType = "application/vnd.unknown.config.v1+json" -) diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/consts.go b/registry-library/vendor/github.com/deislabs/oras/pkg/content/consts.go deleted file mode 100644 index bda9e5f8..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/consts.go +++ /dev/null @@ -1,28 +0,0 @@ -package content - -import ocispec "github.com/opencontainers/image-spec/specs-go/v1" - -const ( - // DefaultBlobMediaType specifies the default blob media type - DefaultBlobMediaType = ocispec.MediaTypeImageLayer - // DefaultBlobDirMediaType specifies the default blob directory media type - DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip -) - -const ( - // TempFilePattern specifies the pattern to create temporary files - TempFilePattern = "oras" -) - -const ( - // AnnotationDigest is the annotation key for the digest of the uncompressed content - AnnotationDigest = "io.deis.oras.content.digest" - // AnnotationUnpack is the annotation key for indication of unpacking - AnnotationUnpack = "io.deis.oras.content.unpack" -) - -const ( - // OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification - // Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file - OCIImageIndexFile = "index.json" -) diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/errors.go b/registry-library/vendor/github.com/deislabs/oras/pkg/content/errors.go deleted file mode 100644 index e4a6cbf4..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package content - -import "errors" - -// Common errors -var ( - ErrNotFound = errors.New("not_found") - ErrNoName = errors.New("no_name") - ErrUnsupportedSize = errors.New("unsupported_size") - ErrUnsupportedVersion = errors.New("unsupported_version") -) - -// FileStore errors -var ( - ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed") - ErrOverwriteDisallowed = errors.New("overwrite_disallowed") -) diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/interface.go b/registry-library/vendor/github.com/deislabs/oras/pkg/content/interface.go deleted file mode 100644 index 85caa3fd..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/interface.go +++ /dev/null @@ -1,9 +0,0 @@ -package content - -import "github.com/containerd/containerd/content" - -// ProvideIngester is the interface that groups the basic Read and Write methods. -type ProvideIngester interface { - content.Provider - content.Ingester -} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/readerat.go b/registry-library/vendor/github.com/deislabs/oras/pkg/content/readerat.go deleted file mode 100644 index 6c1533af..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/readerat.go +++ /dev/null @@ -1,34 +0,0 @@ -package content - -import ( - "io" - - "github.com/containerd/containerd/content" -) - -// ensure interface -var ( - _ content.ReaderAt = sizeReaderAt{} -) - -type readAtCloser interface { - io.ReaderAt - io.Closer -} - -type sizeReaderAt struct { - readAtCloser - size int64 -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -type nopCloser struct { - io.ReaderAt -} - -func (nopCloser) Close() error { - return nil -} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/context/context.go b/registry-library/vendor/github.com/deislabs/oras/pkg/context/context.go deleted file mode 100644 index f070040e..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/context/context.go +++ /dev/null @@ -1,9 +0,0 @@ -package context - -import "context" - -// Background returns a default context with logger discarded. -func Background() context.Context { - ctx := context.Background() - return WithLoggerDiscarded(ctx) -} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/store.go b/registry-library/vendor/github.com/deislabs/oras/pkg/oras/store.go deleted file mode 100644 index d26c53c7..00000000 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/store.go +++ /dev/null @@ -1,119 +0,0 @@ -package oras - -import ( - "context" - "errors" - - orascontent "github.com/deislabs/oras/pkg/content" - - "github.com/containerd/containerd/content" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ensure interface -var ( - _ content.Store = &hybridStore{} -) - -type hybridStore struct { - cache *orascontent.Memorystore - provider content.Provider - ingester content.Ingester -} - -func newHybridStoreFromProvider(provider content.Provider) *hybridStore { - return &hybridStore{ - cache: orascontent.NewMemoryStore(), - provider: provider, - } -} - -func newHybridStoreFromIngester(ingester content.Ingester) *hybridStore { - return &hybridStore{ - cache: orascontent.NewMemoryStore(), - ingester: ingester, - } -} - -func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) { - s.cache.Set(desc, content) -} - -// ReaderAt provides contents -func (s *hybridStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - readerAt, err := s.cache.ReaderAt(ctx, desc) - if err == nil { - return readerAt, nil - } - if s.provider != nil { - return s.provider.ReaderAt(ctx, desc) - } - return nil, err -} - -// Writer begins or resumes the active writer identified by desc -func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - - if isAllowedMediaType(wOpts.Desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex) || s.ingester == nil { - return s.cache.Writer(ctx, opts...) - } - return s.ingester.Writer(ctx, opts...) -} - -// TODO: implement (needed to create a content.Store) -// TODO: do not return empty content.Info -// Abort completely cancels the ingest operation targeted by ref. -func (s *hybridStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - return content.Info{}, nil -} - -// TODO: implement (needed to create a content.Store) -// Update updates mutable information related to content. -// If one or more fieldpaths are provided, only those -// fields will be updated. -// Mutable fields: -// labels.* -func (s *hybridStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// Walk will call fn for each item in the content store which -// match the provided filters. If no filters are given all -// items will be walked. -func (s *hybridStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { - return errors.New("not yet implemented: Walk (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// Delete removes the content from the store. -func (s *hybridStore) Delete(ctx context.Context, dgst digest.Digest) error { - return errors.New("not yet implemented: Delete (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -func (s *hybridStore) Status(ctx context.Context, ref string) (content.Status, error) { - // Status returns the status of the provided ref. - return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// ListStatuses returns the status of any active ingestions whose ref match the -// provided regular expression. If empty, all active ingestions will be -// returned. -func (s *hybridStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { - return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// Abort completely cancels the ingest operation targeted by ref. -func (s *hybridStore) Abort(ctx context.Context, ref string) error { - return errors.New("not yet implemented: Abort (content.Store interface)") -} diff --git a/registry-library/vendor/github.com/klauspost/compress/LICENSE b/registry-library/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..1eb75ef6 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/README.md b/registry-library/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/bitreader.go b/registry-library/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/bitwriter.go b/registry-library/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..43e46361 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/bytereader.go b/registry-library/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/compress.go b/registry-library/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..0d31f5eb --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/registry-library/vendor/github.com/klauspost/compress/fse/fse.go b/registry-library/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/.gitignore b/registry-library/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/README.md b/registry-library/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..8b6e5c66 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/bitreader.go b/registry-library/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..a4979e88 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/registry-library/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..6bce4e87 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,210 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/bytereader.go b/registry-library/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000..50bcdf6e --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/compress.go b/registry-library/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..dea115b3 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,657 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/decompress.go b/registry-library/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..41703bba --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1164 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/huff0/huff0.go b/registry-library/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..7ec2022b --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,273 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/.gitignore b/registry-library/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000..042091d9 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/AUTHORS b/registry-library/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/registry-library/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/LICENSE b/registry-library/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/README b/registry-library/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/decode.go b/registry-library/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 00000000..1c66e372 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/decode_other.go b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 00000000..94a96c5d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/encode.go b/registry-library/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/encode_other.go b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/registry-library/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 00000000..d24eb4b4 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/registry-library/vendor/github.com/klauspost/compress/snappy/snappy.go b/registry-library/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000..ea58ced8 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/README.md b/registry-library/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..7680bfe1 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,417 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd + + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 +silesia.tar zskp 4 211947520 61381950 8115 24.91 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zskp 4 1911399616 171537212 32113 56.76 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zskp 4 1000000000 276609671 44029 21.66 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/bitreader.go b/registry-library/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..85445853 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/registry-library/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..303ae90f --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/blockdec.go b/registry-library/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..b51d922b --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literals before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/blockenc.go b/registry-library/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..9647c64e --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 200 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/registry-library/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/registry-library/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..658ef783 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/bytereader.go b/registry-library/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..2c4fca17 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/registry-library/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000..87896c5e --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,202 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // Window Size the window of data to keep while decoding. + // Will only be set if HasFCS is false. + WindowSize uint64 + + // Frame content size. + // Expected size of the entire frame. + FrameContentSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // Skippable will be true if the frame is meant to be skipped. + // No other information will be populated. + Skippable bool + + // If set there is a checksum present for the block content. + HasCheckSum bool + + // If this is true FrameContentSize will have a valid value + HasFCS bool + + SingleSegment bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + *h = Header{Skippable: true} + return nil + } + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + + // Clear output + *h = Header{} + fhd, in := in[0], in[1:] + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp, in := in[:3], in[3:] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/decoder.go b/registry-library/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..1d41c25d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,561 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + if r == nil { + d.current.err = ErrDecoderNilInput + d.current.flushed = true + return nil + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + if debug { + println("frame reset return EOF") + } + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + if debug { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/registry-library/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..284d3844 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,84 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/dict.go b/registry-library/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..fa25a18d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/enc_base.go b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000..2d4d893e --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,177 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { + l := e.maxMatchOff*2 + int32(d.DictContentSize()) + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/enc_best.go b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000..fe3625c5 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,487 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math/bits" +) + +const ( + bestLongTableBits = 20 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 16 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table +) + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + type match struct { + offset int32 + s int32 + length int32 + rep int32 + } + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{offset: offset, s: s} + } + return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + } + + bestOf := func(a, b match) match { + aScore := b.s - a.s + a.length + bScore := a.s - b.s + b.length + if a.rep < 0 { + aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 + } + if b.rep < 0 { + bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 + } + if aScore >= bScore { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hash8(cv, bestLongTableBits) + nextHashS := hash4x64(cv, bestShortTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + if canRepeat && best.length < goodEnough { + best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) + best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) + best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) + if best.length > 0 { + best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) + best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) + best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hash8(cv, bestLongTableBits)] + candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] + + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash4x64(cv, bestShortTableBits) + nextHashL := hash8(cv, bestLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// ResetDict will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash4x64(cv, hashLog) // 0 -> 4 + nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/enc_better.go b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..c2ce4a2b --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1170 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash5(cv, hashLog) // 0 -> 4 + nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..8629d43d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1121 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hash8(cv0, dFastLongTableBits) + longHash2 := hash8(cv0, dFastLongTableBits) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hash5(cv0, dFastShortTableBits) + hashVal2 := hash5(cv1, dFastShortTableBits) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..ba4a17e1 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,1018 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash6(cv, hashLog) // 0 -> 5 + nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 + nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/encoder.go b/registry-library/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..6f026509 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,576 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/registry-library/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..18a47eb0 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,312 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + case level >= 10: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/framedec.go b/registry-library/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..fc4a566d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/frameenc.go b/registry-library/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..4ef7f5a3 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..e6d3d49b --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..b80709d5 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..6c17dc17 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/hash.go b/registry-library/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..4a752067 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/history.go b/registry-library/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..f783e32d --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..69aa3bb5 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..426b9cac --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 00000000..35318d7c --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..2c9c5357 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..4a5a8216 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/seqdec.go b/registry-library/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..1dd39e63 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,492 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(s.out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/seqenc.go b/registry-library/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..36bcc3cc --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/snappy.go b/registry-library/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..c95fe511 --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/registry-library/vendor/github.com/klauspost/compress/zstd/zstd.go b/registry-library/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..9056beef --- /dev/null +++ b/registry-library/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,156 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/LICENSE.code b/registry-library/vendor/github.com/moby/locker/LICENSE similarity index 99% rename from registry-library/vendor/github.com/opencontainers/go-digest/LICENSE.code rename to registry-library/vendor/github.com/moby/locker/LICENSE index 0ea3ff81..2e0ec1dc 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/LICENSE.code +++ b/registry-library/vendor/github.com/moby/locker/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ @@ -176,7 +175,7 @@ END OF TERMS AND CONDITIONS - Copyright 2016 Docker, Inc. + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/registry-library/vendor/github.com/moby/locker/README.md b/registry-library/vendor/github.com/moby/locker/README.md new file mode 100644 index 00000000..a0852f0f --- /dev/null +++ b/registry-library/vendor/github.com/moby/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/moby/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/registry-library/vendor/github.com/moby/locker/go.mod b/registry-library/vendor/github.com/moby/locker/go.mod new file mode 100644 index 00000000..15b7adcd --- /dev/null +++ b/registry-library/vendor/github.com/moby/locker/go.mod @@ -0,0 +1,3 @@ +module github.com/moby/locker + +go 1.13 diff --git a/registry-library/vendor/github.com/moby/locker/locker.go b/registry-library/vendor/github.com/moby/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/registry-library/vendor/github.com/moby/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/.mailmap b/registry-library/vendor/github.com/opencontainers/go-digest/.mailmap index ba611cb2..eaf8b2f9 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/.mailmap +++ b/registry-library/vendor/github.com/opencontainers/go-digest/.mailmap @@ -1 +1,4 @@ +Aaron Lehmann +Derek McGowan Stephen J Day +Haibing Zhou diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/registry-library/vendor/github.com/opencontainers/go-digest/.pullapprove.yml index 45fa4b9e..b6165f83 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ b/registry-library/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -1,12 +1,28 @@ -approve_by_comment: true -approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' -reject_regex: ^Rejected -reset_on_push: true -author_approval: ignored -signed_off_by: - required: true -reviewers: - teams: - - go-digest-maintainers - name: default +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/.travis.yml b/registry-library/vendor/github.com/opencontainers/go-digest/.travis.yml index 7ea4ed1d..5775f885 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/.travis.yml +++ b/registry-library/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -1,4 +1,5 @@ language: go go: - - 1.7 + - 1.12.x + - 1.13.x - master diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/LICENSE b/registry-library/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 00000000..3ac8ab64 --- /dev/null +++ b/registry-library/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/registry-library/vendor/github.com/opencontainers/go-digest/MAINTAINERS index 42a29795..843b1b20 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ b/registry-library/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -1,9 +1,5 @@ -Aaron Lehmann (@aaronlehmann) -Brandon Philips (@philips) -Brendan Burns (@brendandburns) Derek McGowan (@dmcgowan) -Jason Bouzane (@jbouzane) -John Starks (@jstarks) -Jonathan Boulle (@jonboulle) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/README.md b/registry-library/vendor/github.com/opencontainers/go-digest/README.md index 0f5a0409..a1128720 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/README.md +++ b/registry-library/vendor/github.com/opencontainers/go-digest/README.md @@ -8,20 +8,16 @@ Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) fo # What is a digest? -A digest is just a hash. +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). -The most common use case for a digest is to create a content -identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) -systems: +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: ```go id := digest.FromBytes([]byte("my content")) ``` -In the example above, the id can be used to uniquely identify -the byte slice "my content". This allows two disparate applications -to agree on a verifiable identifier without having to trust one -another. +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. An identifying digest can be verified, as follows: @@ -31,8 +27,7 @@ if id != digest.FromBytes([]byte("my content")) { } ``` -A `Verifier` type can be used to handle cases where an `io.Reader` -makes more sense: +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: ```go rd := getContent() @@ -44,33 +39,28 @@ if !verifier.Verified() { } ``` -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this -can power a rich, safe, content distribution system. +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. # Usage -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is -considered the best resource, a few important items need to be called -out when using this package. +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. -1. Make sure to import the hash implementations into your application - or the package will panic. You should have something like the - following in the main (or other entrypoint) of your application: +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: ```go import ( _ "crypto/sha256" - _ "crypto/sha512" + _ "crypto/sha512" ) ``` This may seem inconvenient but it allows you replace the hash implementations with others, such as https://github.com/stevvooe/resumable. -2. Even though `digest.Digest` may be assemable as a string, _always_ - verify your input with `digest.Parse` or use `Digest.Validate` - when accepting untrusted input. While there are measures to - avoid common problems, this will ensure you have valid digests - in the rest of your application. +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. # Stability @@ -80,25 +70,27 @@ As always, before using a package export, read the [godoc](https://godoc.org/git # Contributing -This package is considered fairly complete. It has been in production -in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. If you think there is a -missing feature, please file a bug clearly describing the problem and -the alternatives you tried before submitting a PR. +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. -# Reporting security issues +## Code of Conduct -Please DO NOT file a public issue, instead send your report privately to -security@opencontainers.org. +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. -The maintainers take security seriously. If you discover a security issue, -please bring it to their attention right away! +## Security -If you are reporting a security issue, do not create an issue or file a pull -request on GitHub. Instead, disclose the issue responsibly by sending an email -to security@opencontainers.org (which is inhabited only by the maintainers of -the various OCI projects). +If you find an issue, please follow the [security][security] protocol to report it. # Copyright and license -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/algorithm.go b/registry-library/vendor/github.com/opencontainers/go-digest/algorithm.go index 8813bd26..490951dc 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/algorithm.go +++ b/registry-library/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -1,3 +1,4 @@ +// Copyright 2019, 2020 OCI Contributors // Copyright 2017 Docker, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/digest.go b/registry-library/vendor/github.com/opencontainers/go-digest/digest.go index ad398cba..518b5e71 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/digest.go +++ b/registry-library/vendor/github.com/opencontainers/go-digest/digest.go @@ -1,3 +1,4 @@ +// Copyright 2019, 2020 OCI Contributors // Copyright 2017 Docker, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/digester.go b/registry-library/vendor/github.com/opencontainers/go-digest/digester.go index 36fa2728..ede90775 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/digester.go +++ b/registry-library/vendor/github.com/opencontainers/go-digest/digester.go @@ -1,3 +1,4 @@ +// Copyright 2019, 2020 OCI Contributors // Copyright 2017 Docker, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/doc.go b/registry-library/vendor/github.com/opencontainers/go-digest/doc.go index 491ea1ef..83d3a936 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/doc.go +++ b/registry-library/vendor/github.com/opencontainers/go-digest/doc.go @@ -1,3 +1,4 @@ +// Copyright 2019, 2020 OCI Contributors // Copyright 2017 Docker, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,8 +30,13 @@ // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/go.mod b/registry-library/vendor/github.com/opencontainers/go-digest/go.mod new file mode 100644 index 00000000..cf5d7b1d --- /dev/null +++ b/registry-library/vendor/github.com/opencontainers/go-digest/go.mod @@ -0,0 +1,3 @@ +module github.com/opencontainers/go-digest + +go 1.13 diff --git a/registry-library/vendor/github.com/opencontainers/go-digest/verifiers.go b/registry-library/vendor/github.com/opencontainers/go-digest/verifiers.go index 32125e91..afef506f 100644 --- a/registry-library/vendor/github.com/opencontainers/go-digest/verifiers.go +++ b/registry-library/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -1,3 +1,4 @@ +// Copyright 2019, 2020 OCI Contributors // Copyright 2017 Docker, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/registry-library/vendor/github.com/pelletier/go-toml/.dockerignore b/registry-library/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 00000000..7b588347 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/registry-library/vendor/github.com/pelletier/go-toml/.gitignore b/registry-library/vendor/github.com/pelletier/go-toml/.gitignore index 99e38bbc..e6ba63a5 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/.gitignore +++ b/registry-library/vendor/github.com/pelletier/go-toml/.gitignore @@ -1,2 +1,5 @@ test_program/test_program_bin fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/registry-library/vendor/github.com/pelletier/go-toml/.travis.yml b/registry-library/vendor/github.com/pelletier/go-toml/.travis.yml deleted file mode 100644 index c9fbf304..00000000 --- a/registry-library/vendor/github.com/pelletier/go-toml/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go: - - 1.8.x - - 1.9.x - - 1.10.x - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -script: - - if [ -n "$(go fmt ./...)" ]; then exit 1; fi - - ./test.sh - - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -branches: - only: [master] -after_success: - - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN diff --git a/registry-library/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/registry-library/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 00000000..405c911c --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [GoDoc][godoc]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[godoc]: https://godoc.org/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/registry-library/vendor/github.com/pelletier/go-toml/Dockerfile b/registry-library/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 00000000..fffdb016 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/registry-library/vendor/github.com/pelletier/go-toml/Makefile b/registry-library/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 00000000..9e4503ae --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/registry-library/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/registry-library/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..041cdc4a --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/registry-library/vendor/github.com/pelletier/go-toml/README.md b/registry-library/vendor/github.com/pelletier/go-toml/README.md index 0d357acf..6831deb5 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/README.md +++ b/registry-library/vendor/github.com/pelletier/go-toml/README.md @@ -3,13 +3,14 @@ Go library for the [TOML](https://github.com/mojombo/toml) format. This library supports TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) -[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) ## Features @@ -17,7 +18,7 @@ Go-toml provides the following features for using data parsed from TOML document * Load TOML documents from files and string data * Easily navigate TOML structure using Tree -* Mashaling and unmarshaling to and from data structures +* Marshaling and unmarshaling to and from data structures * Line & column position data for all parsed elements * [Query support similar to JSON-Path](query/) * Syntax errors contain line and column numbers @@ -73,7 +74,7 @@ Or use a query: q, _ := query.Compile("$..[user,password]") results := q.Execute(config) for ii, item := range results.Values() { - fmt.Println("Query result %d: %v", ii, item) + fmt.Printf("Query result %d: %v\n", ii, item) } ``` @@ -86,7 +87,7 @@ The documentation and additional examples are available at Go-toml provides two handy command line tools: -* `tomll`: Reads TOML files and lint them. +* `tomll`: Reads TOML files and lints them. ``` go install github.com/pelletier/go-toml/cmd/tomll @@ -99,6 +100,30 @@ Go-toml provides two handy command line tools: tomljson --help ``` + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also availble as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + ## Contribute Feel free to report bugs and patches using GitHub's pull requests system on @@ -107,12 +132,7 @@ much appreciated! ### Run tests -You have to make sure two kind of tests run: - -1. The Go unit tests -2. The TOML examples base - -You can run both of them using `./test.sh`. +`go test ./...` ### Fuzzing diff --git a/registry-library/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/registry-library/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 00000000..ff5376b0 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,230 @@ +trigger: +- master + +stages: +- stage: fuzzit + displayName: "Run Fuzzit" + dependsOn: [] + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: submit + displayName: "Submit" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: fuzzing + FUZZIT_API_KEY: $(FUZZIT_API_KEY) + +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: fuzzing + displayName: "fuzzing" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.15" + inputs: + version: "1.15" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: local-regression + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + linux 1.14: + goVersion: '1.14' + imageName: 'ubuntu-latest' + mac 1.14: + goVersion: '1.14' + imageName: 'macOS-latest' + windows 1.14: + goVersion: '1.14' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.15 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.json b/registry-library/vendor/github.com/pelletier/go-toml/benchmark.json deleted file mode 100644 index 86f99c6a..00000000 --- a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.json +++ /dev/null @@ -1,164 +0,0 @@ -{ - "array": { - "key1": [ - 1, - 2, - 3 - ], - "key2": [ - "red", - "yellow", - "green" - ], - "key3": [ - [ - 1, - 2 - ], - [ - 3, - 4, - 5 - ] - ], - "key4": [ - [ - 1, - 2 - ], - [ - "a", - "b", - "c" - ] - ], - "key5": [ - 1, - 2, - 3 - ], - "key6": [ - 1, - 2 - ] - }, - "boolean": { - "False": false, - "True": true - }, - "datetime": { - "key1": "1979-05-27T07:32:00Z", - "key2": "1979-05-27T00:32:00-07:00", - "key3": "1979-05-27T00:32:00.999999-07:00" - }, - "float": { - "both": { - "key": 6.626e-34 - }, - "exponent": { - "key1": 5e+22, - "key2": 1000000, - "key3": -0.02 - }, - "fractional": { - "key1": 1, - "key2": 3.1415, - "key3": -0.01 - }, - "underscores": { - "key1": 9224617.445991227, - "key2": 1e+100 - } - }, - "fruit": [{ - "name": "apple", - "physical": { - "color": "red", - "shape": "round" - }, - "variety": [{ - "name": "red delicious" - }, - { - "name": "granny smith" - } - ] - }, - { - "name": "banana", - "variety": [{ - "name": "plantain" - }] - } - ], - "integer": { - "key1": 99, - "key2": 42, - "key3": 0, - "key4": -17, - "underscores": { - "key1": 1000, - "key2": 5349221, - "key3": 12345 - } - }, - "products": [{ - "name": "Hammer", - "sku": 738594937 - }, - {}, - { - "color": "gray", - "name": "Nail", - "sku": 284758393 - } - ], - "string": { - "basic": { - "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." - }, - "literal": { - "multiline": { - "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", - "regex2": "I [dw]on't need \\d{2} apples" - }, - "quoted": "Tom \"Dubs\" Preston-Werner", - "regex": "\u003c\\i\\c*\\s*\u003e", - "winpath": "C:\\Users\\nodejs\\templates", - "winpath2": "\\\\ServerX\\admin$\\system32\\" - }, - "multiline": { - "continued": { - "key1": "The quick brown fox jumps over the lazy dog.", - "key2": "The quick brown fox jumps over the lazy dog.", - "key3": "The quick brown fox jumps over the lazy dog." - }, - "key1": "One\nTwo", - "key2": "One\nTwo", - "key3": "One\nTwo" - } - }, - "table": { - "inline": { - "name": { - "first": "Tom", - "last": "Preston-Werner" - }, - "point": { - "x": 1, - "y": 2 - } - }, - "key": "value", - "subtable": { - "key": "another value" - } - }, - "x": { - "y": { - "z": { - "w": {} - } - } - } -} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.sh b/registry-library/vendor/github.com/pelletier/go-toml/benchmark.sh index 8b8bb528..a69d3040 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.sh +++ b/registry-library/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -e +set -ex reference_ref=${1:-master} reference_git=${2:-.} @@ -8,7 +8,6 @@ reference_git=${2:-.} if ! `hash benchstat 2>/dev/null`; then echo "Installing benchstat" go get golang.org/x/perf/cmd/benchstat - go install golang.org/x/perf/cmd/benchstat fi tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` @@ -21,12 +20,16 @@ git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null pushd ${ref_tempdir} >/dev/null git checkout ${reference_ref} >/dev/null 2>/dev/null go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} popd >/dev/null echo "" echo "=== local" go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} echo "" echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.toml b/registry-library/vendor/github.com/pelletier/go-toml/benchmark.toml deleted file mode 100644 index dfd77e09..00000000 --- a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.toml +++ /dev/null @@ -1,244 +0,0 @@ -################################################################################ -## Comment - -# Speak your mind with the hash symbol. They go from the symbol to the end of -# the line. - - -################################################################################ -## Table - -# Tables (also known as hash tables or dictionaries) are collections of -# key/value pairs. They appear in square brackets on a line by themselves. - -[table] - -key = "value" # Yeah, you can do this. - -# Nested tables are denoted by table names with dots in them. Name your tables -# whatever crap you please, just don't use #, ., [ or ]. - -[table.subtable] - -key = "another value" - -# You don't need to specify all the super-tables if you don't want to. TOML -# knows how to do it for you. - -# [x] you -# [x.y] don't -# [x.y.z] need these -[x.y.z.w] # for this to work - - -################################################################################ -## Inline Table - -# Inline tables provide a more compact syntax for expressing tables. They are -# especially useful for grouped data that can otherwise quickly become verbose. -# Inline tables are enclosed in curly braces `{` and `}`. No newlines are -# allowed between the curly braces unless they are valid within a value. - -[table.inline] - -name = { first = "Tom", last = "Preston-Werner" } -point = { x = 1, y = 2 } - - -################################################################################ -## String - -# There are four ways to express strings: basic, multi-line basic, literal, and -# multi-line literal. All strings must contain only valid UTF-8 characters. - -[string.basic] - -basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." - -[string.multiline] - -# The following strings are byte-for-byte equivalent: -key1 = "One\nTwo" -key2 = """One\nTwo""" -key3 = """ -One -Two""" - -[string.multiline.continued] - -# The following strings are byte-for-byte equivalent: -key1 = "The quick brown fox jumps over the lazy dog." - -key2 = """ -The quick brown \ - - - fox jumps over \ - the lazy dog.""" - -key3 = """\ - The quick brown \ - fox jumps over \ - the lazy dog.\ - """ - -[string.literal] - -# What you see is what you get. -winpath = 'C:\Users\nodejs\templates' -winpath2 = '\\ServerX\admin$\system32\' -quoted = 'Tom "Dubs" Preston-Werner' -regex = '<\i\c*\s*>' - - -[string.literal.multiline] - -regex2 = '''I [dw]on't need \d{2} apples''' -lines = ''' -The first newline is -trimmed in raw strings. - All other whitespace - is preserved. -''' - - -################################################################################ -## Integer - -# Integers are whole numbers. Positive numbers may be prefixed with a plus sign. -# Negative numbers are prefixed with a minus sign. - -[integer] - -key1 = +99 -key2 = 42 -key3 = 0 -key4 = -17 - -[integer.underscores] - -# For large numbers, you may use underscores to enhance readability. Each -# underscore must be surrounded by at least one digit. -key1 = 1_000 -key2 = 5_349_221 -key3 = 1_2_3_4_5 # valid but inadvisable - - -################################################################################ -## Float - -# A float consists of an integer part (which may be prefixed with a plus or -# minus sign) followed by a fractional part and/or an exponent part. - -[float.fractional] - -key1 = +1.0 -key2 = 3.1415 -key3 = -0.01 - -[float.exponent] - -key1 = 5e+22 -key2 = 1e6 -key3 = -2E-2 - -[float.both] - -key = 6.626e-34 - -[float.underscores] - -key1 = 9_224_617.445_991_228_313 -key2 = 1e1_00 - - -################################################################################ -## Boolean - -# Booleans are just the tokens you're used to. Always lowercase. - -[boolean] - -True = true -False = false - - -################################################################################ -## Datetime - -# Datetimes are RFC 3339 dates. - -[datetime] - -key1 = 1979-05-27T07:32:00Z -key2 = 1979-05-27T00:32:00-07:00 -key3 = 1979-05-27T00:32:00.999999-07:00 - - -################################################################################ -## Array - -# Arrays are square brackets with other primitives inside. Whitespace is -# ignored. Elements are separated by commas. Data types may not be mixed. - -[array] - -key1 = [ 1, 2, 3 ] -key2 = [ "red", "yellow", "green" ] -key3 = [ [ 1, 2 ], [3, 4, 5] ] -#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok - -# Arrays can also be multiline. So in addition to ignoring whitespace, arrays -# also ignore newlines between the brackets. Terminating commas are ok before -# the closing bracket. - -key5 = [ - 1, 2, 3 -] -key6 = [ - 1, - 2, # this is ok -] - - -################################################################################ -## Array of Tables - -# These can be expressed by using a table name in double brackets. Each table -# with the same double bracketed name will be an element in the array. The -# tables are inserted in the order encountered. - -[[products]] - -name = "Hammer" -sku = 738594937 - -[[products]] - -[[products]] - -name = "Nail" -sku = 284758393 -color = "gray" - - -# You can create nested arrays of tables as well. - -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" diff --git a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.yml b/registry-library/vendor/github.com/pelletier/go-toml/benchmark.yml deleted file mode 100644 index 0bd19f08..00000000 --- a/registry-library/vendor/github.com/pelletier/go-toml/benchmark.yml +++ /dev/null @@ -1,121 +0,0 @@ ---- -array: - key1: - - 1 - - 2 - - 3 - key2: - - red - - yellow - - green - key3: - - - 1 - - 2 - - - 3 - - 4 - - 5 - key4: - - - 1 - - 2 - - - a - - b - - c - key5: - - 1 - - 2 - - 3 - key6: - - 1 - - 2 -boolean: - 'False': false - 'True': true -datetime: - key1: '1979-05-27T07:32:00Z' - key2: '1979-05-27T00:32:00-07:00' - key3: '1979-05-27T00:32:00.999999-07:00' -float: - both: - key: 6.626e-34 - exponent: - key1: 5.0e+22 - key2: 1000000 - key3: -0.02 - fractional: - key1: 1 - key2: 3.1415 - key3: -0.01 - underscores: - key1: 9224617.445991227 - key2: 1.0e+100 -fruit: -- name: apple - physical: - color: red - shape: round - variety: - - name: red delicious - - name: granny smith -- name: banana - variety: - - name: plantain -integer: - key1: 99 - key2: 42 - key3: 0 - key4: -17 - underscores: - key1: 1000 - key2: 5349221 - key3: 12345 -products: -- name: Hammer - sku: 738594937 -- {} -- color: gray - name: Nail - sku: 284758393 -string: - basic: - basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." - literal: - multiline: - lines: | - The first newline is - trimmed in raw strings. - All other whitespace - is preserved. - regex2: I [dw]on't need \d{2} apples - quoted: Tom "Dubs" Preston-Werner - regex: "<\\i\\c*\\s*>" - winpath: C:\Users\nodejs\templates - winpath2: "\\\\ServerX\\admin$\\system32\\" - multiline: - continued: - key1: The quick brown fox jumps over the lazy dog. - key2: The quick brown fox jumps over the lazy dog. - key3: The quick brown fox jumps over the lazy dog. - key1: |- - One - Two - key2: |- - One - Two - key3: |- - One - Two -table: - inline: - name: - first: Tom - last: Preston-Werner - point: - x: 1 - y: 2 - key: value - subtable: - key: another value -x: - y: - z: - w: {} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/doc.go b/registry-library/vendor/github.com/pelletier/go-toml/doc.go index d5fd98c0..a1406a32 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/doc.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/doc.go @@ -1,7 +1,7 @@ // Package toml is a TOML parser and manipulation library. // // This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md // // Marshaling // diff --git a/registry-library/vendor/github.com/pelletier/go-toml/example-crlf.toml b/registry-library/vendor/github.com/pelletier/go-toml/example-crlf.toml index 12950a16..780d9c68 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ b/registry-library/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/registry-library/vendor/github.com/pelletier/go-toml/example.toml b/registry-library/vendor/github.com/pelletier/go-toml/example.toml index 3d902f28..f45bf88b 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/example.toml +++ b/registry-library/vendor/github.com/pelletier/go-toml/example.toml @@ -27,3 +27,4 @@ enabled = true [clients] data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/registry-library/vendor/github.com/pelletier/go-toml/fuzzit.sh b/registry-library/vendor/github.com/pelletier/go-toml/fuzzit.sh new file mode 100644 index 00000000..b575a608 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/fuzzit.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +# go-fuzz doesn't support modules yet, so ensure we do everything +# in the old style GOPATH way +export GO111MODULE="off" + +# install go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) +# to add another target, make sure to create it with `fuzzit create target` +# before using `fuzzit create job` +TARGET=toml-fuzzer + +go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml +clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} + +# install fuzzit for talking to fuzzit.dev service +# or latest version: +# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 +wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there +./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/go.mod b/registry-library/vendor/github.com/pelletier/go-toml/go.mod new file mode 100644 index 00000000..e924cb90 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/go.mod @@ -0,0 +1,5 @@ +module github.com/pelletier/go-toml + +go 1.12 + +require github.com/davecgh/go-spew v1.1.1 diff --git a/registry-library/vendor/github.com/pelletier/go-toml/go.sum b/registry-library/vendor/github.com/pelletier/go-toml/go.sum new file mode 100644 index 00000000..6f356470 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/go.sum @@ -0,0 +1,19 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/registry-library/vendor/github.com/pelletier/go-toml/keysparsing.go b/registry-library/vendor/github.com/pelletier/go-toml/keysparsing.go index 284db646..e091500b 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -3,83 +3,110 @@ package toml import ( - "bytes" "errors" "fmt" - "unicode" ) // Convert the bare key group string to an array. -// The input supports double quotation to allow "." inside the key name, +// The input supports double quotation and single quotation, // but escape sequences are not supported. Lexers must unescape them beforehand. func parseKey(key string) ([]string, error) { - groups := []string{} - var buffer bytes.Buffer - inQuotes := false - wasInQuotes := false - ignoreSpace := true - expectDot := false + runes := []rune(key) + var groups []string - for _, char := range key { - if ignoreSpace { - if char == ' ' { - continue - } - ignoreSpace = false + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace } - switch char { - case '"': - if inQuotes { - groups = append(groups, buffer.String()) - buffer.Reset() - wasInQuotes = true - } - inQuotes = !inQuotes - expectDot = false - case '.': - if inQuotes { - buffer.WriteRune(char) - } else { - if !wasInQuotes { - if buffer.Len() == 0 { - return nil, errors.New("empty table key") + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace } - groups = append(groups, buffer.String()) - buffer.Reset() + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") } - ignoreSpace = true - expectDot = false - wasInQuotes = false + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ } - case ' ': - if inQuotes { - buffer.WriteRune(char) - } else { - expectDot = true + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ } - default: - if !inQuotes && !isValidBareChar(char) { - return nil, fmt.Errorf("invalid bare character: %c", char) + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") } - if !inQuotes && expectDot { - return nil, errors.New("what?") + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") } - buffer.WriteRune(char) - expectDot = false + } else { + return nil, fmt.Errorf("invalid key character: %c", r) } } - if inQuotes { - return nil, errors.New("mismatched quotes") - } - if buffer.Len() > 0 { - groups = append(groups, buffer.String()) - } if len(groups) == 0 { - return nil, errors.New("empty key") + return nil, fmt.Errorf("empty key") } return groups, nil } func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) + return isAlphanumeric(r) || r == '-' || isDigit(r) } diff --git a/registry-library/vendor/github.com/pelletier/go-toml/lexer.go b/registry-library/vendor/github.com/pelletier/go-toml/lexer.go index d11de428..b1886192 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/lexer.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/lexer.go @@ -26,7 +26,7 @@ type tomlLexer struct { currentTokenStart int currentTokenStop int tokens []token - depth int + brackets []rune line int col int endbufferLine int @@ -123,6 +123,8 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { for { next := l.peek() switch next { + case '}': // after '{' + return l.lexRightCurlyBrace case '[': return l.lexTableKey case '#': @@ -140,10 +142,6 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn { l.skip() } - if l.depth > 0 { - return l.lexRvalue - } - if isKeyStartChar(next) { return l.lexKey } @@ -167,10 +165,8 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { case '=': return l.lexEqual case '[': - l.depth++ return l.lexLeftBracket case ']': - l.depth-- return l.lexRightBracket case '{': return l.lexLeftCurlyBrace @@ -188,12 +184,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { fallthrough case '\n': l.skip() - if l.depth == 0 { - return l.lexVoid + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue } - return l.lexRvalue - case '_': - return l.errorf("cannot start number with underscore") + return l.lexVoid } if l.follow("true") { @@ -223,9 +217,12 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { } possibleDate := l.peekString(35) - dateMatch := dateRegexp.FindString(possibleDate) - if dateMatch != "" { - l.fastForward(len(dateMatch)) + dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) + if dateSubmatches != nil && dateSubmatches[0] != "" { + l.fastForward(len(dateSubmatches[0])) + if dateSubmatches[2] == "" { // no timezone information => local date + return l.lexLocalDate + } return l.lexDate } @@ -233,10 +230,6 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { return l.lexNumber } - if isAlphanumeric(next) { - return l.lexKey - } - return l.errorf("no value can start with %c", next) } @@ -247,12 +240,17 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenLeftCurlyBrace) - return l.lexRvalue + l.brackets = append(l.brackets, '{') + return l.lexVoid } func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } @@ -261,6 +259,11 @@ func (l *tomlLexer) lexDate() tomlLexStateFn { return l.lexRvalue } +func (l *tomlLexer) lexLocalDate() tomlLexStateFn { + l.emit(tokenLocalDate) + return l.lexRvalue +} + func (l *tomlLexer) lexTrue() tomlLexStateFn { l.fastForward(4) l.emit(tokenTrue) @@ -294,13 +297,16 @@ func (l *tomlLexer) lexEqual() tomlLexStateFn { func (l *tomlLexer) lexComma() tomlLexStateFn { l.next() l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } return l.lexRvalue } // Parse the key and emits its value without escape sequences. // bare keys, basic string keys and literal string keys are supported. func (l *tomlLexer) lexKey() tomlLexStateFn { - growingString := "" + var sb strings.Builder for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { if r == '"' { @@ -309,7 +315,9 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += str + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") l.next() continue } else if r == '\'' { @@ -318,20 +326,45 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += str + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { - break + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue + } else if r == '.' { + // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } - growingString += string(r) + sb.WriteRune(r) l.next() } - l.emitWithValue(tokenKey, growingString) + l.emitWithValue(tokenKey, sb.String()) return l.lexVoid } @@ -351,11 +384,12 @@ func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { l.next() l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') return l.lexRvalue } func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -369,14 +403,14 @@ func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNe // find end of string for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } next := l.peek() if next == eof { break } - growingString += string(l.next()) + sb.WriteRune(l.next()) } return "", errors.New("unclosed string") @@ -410,7 +444,7 @@ func (l *tomlLexer) lexLiteralString() tomlLexStateFn { // Terminator is the substring indicating the end of the token. // The resulting string does not include the terminator. func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - growingString := "" + var sb strings.Builder if discardLeadingNewLine { if l.follow("\r\n") { @@ -423,7 +457,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, for { if l.follow(terminator) { - return growingString, nil + return sb.String(), nil } if l.follow("\\") { @@ -441,72 +475,72 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, l.next() } case '"': - growingString += "\"" + sb.WriteString("\"") l.next() case 'n': - growingString += "\n" + sb.WriteString("\n") l.next() case 'b': - growingString += "\b" + sb.WriteString("\b") l.next() case 'f': - growingString += "\f" + sb.WriteString("\f") l.next() case '/': - growingString += "/" + sb.WriteString("/") l.next() case 't': - growingString += "\t" + sb.WriteString("\t") l.next() case 'r': - growingString += "\r" + sb.WriteString("\r") l.next() case '\\': - growingString += "\\" + sb.WriteString("\\") l.next() case 'u': l.next() - code := "" + var code strings.Builder for i := 0; i < 4; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 32) + intcode, err := strconv.ParseInt(code.String(), 16, 32) if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code) + return "", errors.New("invalid unicode escape: \\u" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) case 'U': l.next() - code := "" + var code strings.Builder for i := 0; i < 8; i++ { c := l.peek() if !isHexDigit(c) { return "", errors.New("unfinished unicode escape") } l.next() - code = code + string(c) + code.WriteRune(c) } - intcode, err := strconv.ParseInt(code, 16, 64) + intcode, err := strconv.ParseInt(code.String(), 16, 64) if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code) + return "", errors.New("invalid unicode escape: \\U" + code.String()) } - growingString += string(rune(intcode)) + sb.WriteRune(rune(intcode)) default: return "", errors.New("invalid escape sequence: \\" + string(l.peek())) } } else { r := l.peek() - if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { return "", fmt.Errorf("unescaped control character %U", r) } l.next() - growingString += string(r) + sb.WriteRune(r) } if l.peek() == eof { @@ -533,7 +567,6 @@ func (l *tomlLexer) lexString() tomlLexStateFn { } str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { return l.errorf(err.Error()) } @@ -605,6 +638,10 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { func (l *tomlLexer) lexRightBracket() tomlLexStateFn { l.next() l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] return l.lexRvalue } @@ -731,7 +768,27 @@ func (l *tomlLexer) run() { } func init() { - dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) + // Regexp for all date/time formats supported by TOML. + // Group 1: nano precision + // Group 2: timezone + // + // /!\ also matches the empty string + // + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) } // Entry point diff --git a/registry-library/vendor/github.com/pelletier/go-toml/localtime.go b/registry-library/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 00000000..a2149e96 --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,281 @@ +// Implementation of TOML's local date/time. +// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go +// to avoid pulling all the Google dependencies. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/marshal.go b/registry-library/vendor/github.com/pelletier/go-toml/marshal.go index 671da556..032e0ffc 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/marshal.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/marshal.go @@ -2,24 +2,34 @@ package toml import ( "bytes" + "encoding" "errors" "fmt" "io" "reflect" + "sort" "strconv" "strings" "time" ) -const tagKeyMultiline = "multiline" +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagDefault = "default" +) type tomlOpts struct { - name string - comment string - commented bool - multiline bool - include bool - omitempty bool + name string + nameFromTag bool + comment string + commented bool + multiline bool + include bool + omitempty bool + defaultValue string } type encOpts struct { @@ -31,10 +41,44 @@ var encOptsDefaults = encOpts{ quoteMapKeys: false, } +type annotation struct { + tag string + comment string + commented string + multiline string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + defaultValue: tagDefault, +} + +type marshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical marshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) -// Check if the given marshall type maps to a Tree primitive +// Check if the given marshal type maps to a Tree primitive func isPrimitive(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: @@ -50,37 +94,69 @@ func isPrimitive(mtype reflect.Type) bool { case reflect.String: return true case reflect.Struct: - return mtype == timeType || isCustomMarshaler(mtype) + return isTimeType(mtype) default: return false } } -// Check if the given marshall type maps to a Tree slice -func isTreeSlice(mtype reflect.Type) bool { +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { switch mtype.Kind() { - case reflect.Slice: - return !isOtherSlice(mtype) + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) default: return false } } -// Check if the given marshall type maps to a non-Tree slice -func isOtherSlice(mtype reflect.Type) bool { +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: - return isOtherSlice(mtype.Elem()) - case reflect.Slice: - return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) default: return false } } -// Check if the given marshall type maps to a Tree +// Check if the given marshal type maps to a Tree func isTree(mtype reflect.Type) bool { switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) case reflect.Map: return true case reflect.Struct: @@ -98,12 +174,42 @@ func callCustomMarshaler(mval reflect.Value) ([]byte, error) { return mval.Interface().(Marshaler).MarshalTOML() } +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + // Marshaler is the interface implemented by types that // can marshal themselves into valid TOML. type Marshaler interface { MarshalTOML() ([]byte, error) } +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + /* Marshal returns the TOML encoding of v. Behavior is similar to the Go json encoder, except that there is no concept of a Marshaler interface or MarshalTOML @@ -135,7 +241,9 @@ Tree primitive types and corresponding marshal types: float64 float32, float64, pointers to same string string, pointers to same bool bool, pointers to same - time.Time time.Time{}, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. */ func Marshal(v interface{}) ([]byte, error) { return NewEncoder(nil).marshal(v) @@ -145,13 +253,24 @@ func Marshal(v interface{}) ([]byte, error) { type Encoder struct { w io.Writer encOpts + annotation + line int + col int + order marshalOrder + promoteAnon bool + indentation string } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", } } @@ -197,65 +316,175 @@ func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { return e } +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord marshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Struct { - return []byte{}, errors.New("Only a struct can be marshaled to TOML") + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") } + sval := reflect.ValueOf(v) if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } t, err := e.valueToTree(mtype, sval) if err != nil { return []byte{}, err } var buf bytes.Buffer - _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false) return buf.Bytes(), err } +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + // Convert given marshal struct or map value to toml tree func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { return e.valueToTree(mtype.Elem(), mval.Elem()) } - tval := newTree() + tval := e.nextTree() switch mtype.Kind() { case reflect.Struct: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef) - if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } } - - tval.SetWithOptions(opts.name, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) } } case reflect.Map: - for _, key := range mval.MapKeys() { + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { return nil, err } + val = e.wrapTomlValue(val, tval) if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) if err != nil { return nil, err } tval.SetPath([]string{keyStr}, val) } else { - tval.Set(key.String(), val) + tval.SetPath([]string{key.String()}, val) } } } @@ -291,22 +520,39 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int // Convert given marshal value to toml value func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) } switch { case isCustomMarshaler(mtype): return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err case isTree(mtype): return e.valueToTree(mtype, mval) - case isTreeSlice(mtype): - return e.valueToTreeSlice(mtype, mval) - case isOtherSlice(mtype): + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: return mval.Bool(), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } return mval.Int(), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return mval.Uint(), nil @@ -315,18 +561,50 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface case reflect.String: return mval.String(), nil case reflect.Struct: - return mval.Interface().(time.Time), nil + return mval.Interface(), nil default: return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) } } } +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + // Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t} + d := Decoder{tval: t, tagName: tagFieldName} return d.unmarshal(v) } @@ -334,8 +612,11 @@ func (t *Tree) Unmarshal(v interface{}) error { // See Marshal() documentation for types mapping table. func (t *Tree) Marshal() ([]byte, error) { var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -347,6 +628,14 @@ func (t *Tree) Marshal() ([]byte, error) { // The following struct annotations are supported: // // toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 // // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { @@ -362,6 +651,9 @@ type Decoder struct { r io.Reader tval *Tree encOpts + tagName string + strict bool + visitor visitorState } // NewDecoder returns a new decoder that reads from r. @@ -369,6 +661,7 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{ r: r, encOpts: encOptsDefaults, + tagName: tagFieldName, } } @@ -385,60 +678,200 @@ func (d *Decoder) Decode(v interface{}) error { return d.unmarshal(v) } +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) } - sval, err := d.valueFromTree(mtype.Elem(), d.tval) + sval, err := d.valueFromTree(elem, d.tval, &vv) if err != nil { return err } + if err := d.visitor.validate(); err != nil { + return err + } reflect.ValueOf(v).Elem().Set(sval) return nil } -// Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil } + var mval reflect.Value switch mtype.Kind() { case reflect.Struct: - mval = reflect.New(mtype).Elem() - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - opts := tomlOptions(mtypef) - if opts.include { + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } baseKey := opts.name - keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} - for _, key := range keysToTry { - exists := tval.Has(key) - if !exists { - continue + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil } - val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + return v, err } - mval.Field(i).Set(mvalf) - break + mval.Field(i).Set(v) } } } case reflect.Map: mval = reflect.MakeMap(mtype) for _, key := range tval.Keys() { + d.visitor.push(key) // TODO: path splits key val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) if err != nil { - return mval, formatError(err, tval.GetPosition(key)) + return mval, formatError(err, tval.GetPositionPath([]string{key})) } - mval.SetMapIndex(reflect.ValueOf(key), mvalf) + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() } } return mval, nil @@ -446,22 +879,32 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, // Convert toml value to marshal struct/map slice, using marshal type func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } mval.Index(i).Set(val) + d.visitor.pop() } return mval, nil } // Convert toml value to marshal primitive slice, using marshal type func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } @@ -470,33 +913,133 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r return mval, nil } -// Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) } - switch tval.(type) { + switch t := tval.(type) { case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + if isTree(mtype) { - return d.valueFromTree(mtype, tval.(*Tree)) + return d.valueFromTree(mtype, t, mval11) } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) case []*Tree: - if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: - if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, tval.([]interface{})) + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: + d.visitor.visit() + // Check if pointer to value implements the encoding.TextUnmarshaler. + if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + switch mtype.Kind() { case reflect.Bool, reflect.Struct: val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } @@ -512,45 +1055,72 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V return val.Convert(mtype), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if val.Int() < 0 { + + if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil case reflect.Float32, reflect.Float64: val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) default: return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) } } } -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) if err != nil { return reflect.ValueOf(nil), err } @@ -559,21 +1129,38 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.V return mval, nil } -func tomlOptions(vf reflect.StructField) tomlOpts { - tag := vf.Tag.Get("toml") +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) parse := strings.Split(tag, ",") var comment string - if c := vf.Tag.Get("comment"); c != "" { + if c := vf.Tag.Get(an.comment); c != "" { comment = c } - commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) - multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) - result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + include: true, + omitempty: false, + defaultValue: defaultValue, + } if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false } else { result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true } } if vf.PkgPath != "" { @@ -590,11 +1177,7 @@ func tomlOptions(vf reflect.StructField) tomlOpts { func isZero(val reflect.Value) bool { switch val.Type().Kind() { - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: + case reflect.Slice, reflect.Array, reflect.Map: return val.Len() == 0 default: return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) @@ -607,3 +1190,80 @@ func formatError(err error, pos Position) error { } return fmt.Errorf("%s: %s", pos, err) } + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/registry-library/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/registry-library/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 00000000..792b72ed --- /dev/null +++ b/registry-library/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/registry-library/vendor/github.com/pelletier/go-toml/marshal_test.toml b/registry-library/vendor/github.com/pelletier/go-toml/marshal_test.toml index 1c5f98e7..ba5e110b 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ b/registry-library/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -4,6 +4,7 @@ title = "TOML Marshal Testing" bool = true date = 1979-05-27T07:32:00Z float = 123.4 + float64 = 123.456782132399 int = 5000 string = "Bite me" uint = 5001 diff --git a/registry-library/vendor/github.com/pelletier/go-toml/parser.go b/registry-library/vendor/github.com/pelletier/go-toml/parser.go index 2d27599a..7bf40bbd 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/parser.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/parser.go @@ -77,8 +77,10 @@ func (p *tomlParser) parseStart() tomlParserStateFn { return p.parseAssign case tokenEOF: return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) default: - p.raiseError(tok, "unexpected token") + p.raiseError(tok, "unexpected token %s", tok.typ) } return nil } @@ -156,6 +158,11 @@ func (p *tomlParser) parseGroup() tomlParserStateFn { if err := p.tree.createSubTree(keys, startToken.Position); err != nil { p.raiseError(key, "%s", err) } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } p.assume(tokenRightBracket) p.currentTable = keys return p.parseStart @@ -165,6 +172,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { key := p.getToken() p.assume(tokenEqual) + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + value := p.parseRvalue() var tableKey []string if len(p.currentTable) > 0 { @@ -173,6 +185,9 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { tableKey = []string{} } + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + // find the table to assign, looking out for arrays of tables var targetNode *Tree switch node := p.tree.GetPath(tableKey).(type) { @@ -180,17 +195,24 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { targetNode = node[len(node)-1] case *Tree: targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) default: p.raiseError(key, "Unknown table type for path: %s", strings.Join(tableKey, ".")) } - // assign value to the found table - keyVals := []string{key.val} - if len(keyVals) != 1 { - p.raiseError(key, "Invalid key") + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) } - keyVal := keyVals[0] + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] localKey := []string{keyVal} finalKey := append(tableKey, keyVal) if targetNode.GetPath(localKey) != nil { @@ -301,7 +323,41 @@ func (p *tomlParser) parseRvalue() interface{} { } return val case tokenDate: - val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + layout := time.RFC3339Nano + if !strings.Contains(tok.val, "T") { + layout = strings.Replace(layout, "T", " ", 1) + } + val, err := time.ParseInLocation(layout, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + v := strings.Replace(tok.val, " ", "T", -1) + isDateTime := false + isTime := false + for _, c := range v { + if c == 'T' || c == 't' { + isDateTime = true + break + } + if c == ':' { + isTime = true + break + } + } + + var val interface{} + var err error + + if isDateTime { + val, err = ParseLocalDateTime(v) + } else if isTime { + val, err = ParseLocalTime(v) + } else { + val, err = ParseLocalDate(v) + } + if err != nil { p.raiseError(tok, "%s", err) } @@ -338,18 +394,21 @@ Loop: case tokenRightCurlyBrace: p.getToken() break Loop - case tokenKey: + case tokenKey, tokenInteger, tokenString: if !tokenIsComma(previous) && previous != nil { p.raiseError(follow, "comma expected between fields in inline table") } key := p.getToken() p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + value := p.parseRvalue() - tree.Set(key.val, value) + tree.SetPath(parsedKey, value) case tokenComma: - if previous == nil { - p.raiseError(follow, "inline table cannot start with a comma") - } if tokenIsComma(previous) { p.raiseError(follow, "need field between two commas in inline table") } @@ -362,12 +421,13 @@ Loop: if tokenIsComma(previous) { p.raiseError(previous, "trailing comma at the end of inline table") } + tree.inline = true return tree } func (p *tomlParser) parseArray() interface{} { var array []interface{} - arrayType := reflect.TypeOf(nil) + arrayType := reflect.TypeOf(newTree()) for { follow := p.peek() if follow == nil || follow.typ == tokenEOF { @@ -378,11 +438,8 @@ func (p *tomlParser) parseArray() interface{} { break } val := p.parseRvalue() - if arrayType == nil { - arrayType = reflect.TypeOf(val) - } if reflect.TypeOf(val) != arrayType { - p.raiseError(follow, "mixed types in array") + arrayType = nil } array = append(array, val) follow = p.peek() @@ -396,6 +453,12 @@ func (p *tomlParser) parseArray() interface{} { p.getToken() } } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } // An array of Trees is actually an array of inline // tables, which is a shorthand for a table array. If the // array was not converted from []interface{} to []*Tree, diff --git a/registry-library/vendor/github.com/pelletier/go-toml/test.sh b/registry-library/vendor/github.com/pelletier/go-toml/test.sh deleted file mode 100644 index ba6adf3f..00000000 --- a/registry-library/vendor/github.com/pelletier/go-toml/test.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# fail out of the script if anything here fails -set -e -set -o pipefail - -# set the path to the present working directory -export GOPATH=`pwd` - -function git_clone() { - path=$1 - branch=$2 - version=$3 - if [ ! -d "src/$path" ]; then - mkdir -p src/$path - git clone https://$path.git src/$path - fi - pushd src/$path - git checkout "$branch" - git reset --hard "$version" - popd -} - -# Remove potential previous runs -rm -rf src test_program_bin toml-test - -go get github.com/pelletier/go-buffruneio -go get github.com/davecgh/go-spew/spew -go get gopkg.in/yaml.v2 -go get github.com/BurntSushi/toml - -# get code for BurntSushi TOML validation -# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) -git_clone github.com/BurntSushi/toml master HEAD -git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD - -# build the BurntSushi test application -go build -o toml-test github.com/BurntSushi/toml-test - -# vendorize the current lib for testing -# NOTE: this basically mocks an install without having to go back out to github for code -mkdir -p src/github.com/pelletier/go-toml/cmd -mkdir -p src/github.com/pelletier/go-toml/query -cp *.go *.toml src/github.com/pelletier/go-toml -cp -R cmd/* src/github.com/pelletier/go-toml/cmd -cp -R query/* src/github.com/pelletier/go-toml/query -go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go - -# Run basic unit tests -go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out -go test github.com/pelletier/go-toml/cmd/tomljson -go test github.com/pelletier/go-toml/query - -# run the entire BurntSushi test suite -if [[ $# -eq 0 ]] ; then - echo "Running all BurntSushi tests" - ./toml-test ./test_program_bin | tee test_out -else - # run a specific test - test=$1 - test_path='src/github.com/BurntSushi/toml-test/tests' - valid_test="$test_path/valid/$test" - invalid_test="$test_path/invalid/$test" - - if [ -e "$valid_test.toml" ]; then - echo "Valid Test TOML for $test:" - echo "====" - cat "$valid_test.toml" - - echo "Valid Test JSON for $test:" - echo "====" - cat "$valid_test.json" - - echo "Go-TOML Output for $test:" - echo "====" - cat "$valid_test.toml" | ./test_program_bin - fi - - if [ -e "$invalid_test.toml" ]; then - echo "Invalid Test TOML for $test:" - echo "====" - cat "$invalid_test.toml" - - echo "Go-TOML Output for $test:" - echo "====" - echo "go-toml Output:" - cat "$invalid_test.toml" | ./test_program_bin - fi -fi diff --git a/registry-library/vendor/github.com/pelletier/go-toml/token.go b/registry-library/vendor/github.com/pelletier/go-toml/token.go index 1a908134..6af4ec46 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/token.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/token.go @@ -1,10 +1,6 @@ package toml -import ( - "fmt" - "strconv" - "unicode" -) +import "fmt" // Define tokens type tokenType int @@ -35,6 +31,7 @@ const ( tokenDoubleLeftBracket tokenDoubleRightBracket tokenDate + tokenLocalDate tokenKeyGroup tokenKeyGroupArray tokenComma @@ -68,7 +65,8 @@ var tokenTypeNames = []string{ ")", "]]", "[[", - "Date", + "LocalDate", + "LocalDate", "KeyGroup", "KeyGroupArray", ",", @@ -95,14 +93,6 @@ func (tt tokenType) String() string { return "Unknown" } -func (t token) Int() int { - if result, err := strconv.Atoi(t.val); err != nil { - panic(err) - } else { - return result - } -} - func (t token) String() string { switch t.typ { case tokenEOF: @@ -119,7 +109,7 @@ func isSpace(r rune) bool { } func isAlphanumeric(r rune) bool { - return unicode.IsLetter(r) || r == '_' + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' } func isKeyChar(r rune) bool { @@ -134,7 +124,7 @@ func isKeyStartChar(r rune) bool { } func isDigit(r rune) bool { - return unicode.IsNumber(r) + return '0' <= r && r <= '9' } func isHexDigit(r rune) bool { diff --git a/registry-library/vendor/github.com/pelletier/go-toml/toml.go b/registry-library/vendor/github.com/pelletier/go-toml/toml.go index 98c185ad..cbb89a9a 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/toml.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/toml.go @@ -23,13 +23,18 @@ type Tree struct { values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree comment string commented bool + inline bool position Position } func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { return &Tree{ values: make(map[string]interface{}), - position: Position{}, + position: pos, } } @@ -117,6 +122,89 @@ func (t *Tree) GetPath(keys []string) interface{} { } } +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + // GetPosition returns the position of the given key. func (t *Tree) GetPosition(key string) Position { if key == "" { @@ -125,6 +213,50 @@ func (t *Tree) GetPosition(key string) Position { return t.GetPositionPath(strings.Split(key, ".")) } +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + // GetPositionPath returns the element in the tree indicated by 'keys'. // If keys is of length zero, the current tree is returned. func (t *Tree) GetPositionPath(keys []string) Position { @@ -194,10 +326,10 @@ func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { // formatting instructions to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { + for i, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] if !exists { - nextTree = newTree() + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) subtree.values[intermediateKey] = nextTree // add new element here } switch node := nextTree.(type) { @@ -207,7 +339,8 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac // go to most recent element if len(node) == 0 { // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node } subtree = node[len(node)-1] } @@ -215,19 +348,27 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac var toInsert interface{} - switch value.(type) { + switch v := value.(type) { case *Tree: - tt := value.(*Tree) - tt.comment = opts.Comment + v.comment = opts.Comment + v.commented = opts.Commented toInsert = value case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } toInsert = value case *tomlValue: - tt := value.(*tomlValue) - tt.comment = opts.Comment - toInsert = tt + v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline + toInsert = v default: - toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} } subtree.values[keys[len(keys)-1]] = toInsert @@ -256,44 +397,35 @@ func (t *Tree) SetPath(keys []string, value interface{}) { // SetPathWithComment is the same as SetPath, but allows you to provide comment // information to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } - } + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} - var toInsert interface{} +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} - switch value.(type) { +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { case *Tree: - tt := value.(*Tree) - tt.comment = comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: comment, commented: commented} + delete(node.values, item) + return nil } - - subtree.values[keys[len(keys)-1]] = toInsert + return errors.New("no such key to delete") } // createSubTree takes a tree and a key and create the necessary intermediate @@ -305,11 +437,12 @@ func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, // Returns nil on success, error object on failure func (t *Tree) createSubTree(keys []string, pos Position) error { subtree := t - for _, intermediateKey := range keys { + for i, intermediateKey := range keys { nextTree, exists := subtree.values[intermediateKey] if !exists { - tree := newTree() + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) tree.position = pos + tree.inline = subtree.inline subtree.values[intermediateKey] = tree nextTree = tree } @@ -337,10 +470,39 @@ func LoadBytes(b []byte) (tree *Tree, err error) { err = errors.New(r.(string)) } }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + tree = parseToml(lexToml(b)) return } +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + // LoadReader creates a Tree from any io.Reader. func LoadReader(reader io.Reader) (tree *Tree, err error) { inputBytes, err := ioutil.ReadAll(reader) diff --git a/registry-library/vendor/github.com/pelletier/go-toml/tomltree_create.go b/registry-library/vendor/github.com/pelletier/go-toml/tomltree_create.go index 79610e9b..80353500 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -57,6 +57,19 @@ func simpleValueCoercion(object interface{}) (interface{}, error) { return float64(original), nil case fmt.Stringer: return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil default: return nil, fmt.Errorf("cannot convert type %T to Tree", object) } diff --git a/registry-library/vendor/github.com/pelletier/go-toml/tomltree_write.go b/registry-library/vendor/github.com/pelletier/go-toml/tomltree_write.go index e4049e29..ae6dac49 100644 --- a/registry-library/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/registry-library/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "math" + "math/big" "reflect" "sort" "strconv" @@ -12,26 +13,50 @@ import ( "time" ) +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + // Encodes a string to a TOML-compliant multi-line string value // This function is a clone of the existing encodeTomlString function, except that whitespace characters // are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string) string { +func encodeMultilineTomlString(value string, commented string) string { var b bytes.Buffer + adjacentQuoteCount := 0 - for _, rr := range value { + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } switch rr { case '\b': b.WriteString(`\b`) case '\t': b.WriteString("\t") case '\n': - b.WriteString("\n") + b.WriteString("\n" + commented) case '\f': b.WriteString(`\f`) case '\r': b.WriteString("\r") case '"': - b.WriteString(`"`) + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } case '\\': b.WriteString(`\`) default: @@ -78,7 +103,30 @@ func encodeTomlString(value string) string { return b.String() } -func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { +func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) { // this interface check is added to dereference the change made in the writeTo function. // That change was made to allow this function to see formatting options. tv, ok := v.(*tomlValue) @@ -94,20 +142,28 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen case int64: return strconv.FormatInt(value, 10), nil case float64: - // Ensure a round float does contain a decimal point. Otherwise feeding - // the output back to the parser would convert to an integer. + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil case string: if tv.multiline { - return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil } return "\"" + encodeTomlString(value) + "\"", nil case []byte: b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + return string(b), nil case bool: if value { return "true", nil @@ -115,6 +171,14 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen return "false", nil case time.Time: return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) case nil: return "", nil } @@ -125,7 +189,7 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen var values []string for i := 0; i < rv.Len(); i++ { item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) if err != nil { return "", err } @@ -139,131 +203,252 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen for _, value := range values { stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(value) + stringBuffer.WriteString(commented + value) stringBuffer.WriteString(`,`) stringBuffer.WriteString("\n") } - stringBuffer.WriteString(indent + "]") + stringBuffer.WriteString(indent + commented + "]") return stringBuffer.String(), nil } - return "[" + strings.Join(values, ",") + "]", nil + return "[" + strings.Join(values, ", ") + "]", nil } return "", fmt.Errorf("unsupported value type %T: %v", v, v) } -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - simpleValuesKeys := make([]string, 0) - complexValuesKeys := make([]string, 0) +func getTreeArrayLine(trees []*Tree) (line int) { + // get lowest line number that is not 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) for k := range t.values { v := t.values[k] switch v.(type) { - case *Tree, []*Tree: - complexValuesKeys = append(complexValuesKeys, k) + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} default: - simpleValuesKeys = append(simpleValuesKeys, k) + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node } + sort.Ints(lines) - sort.Strings(simpleValuesKeys) - sort.Strings(complexValuesKeys) + for i, line := range lines { + vals[i] = m[line] + } - for _, k := range simpleValuesKeys { - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } + return vals +} - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) } + vals = append(vals, node) + m[node.key] = node + } - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ } - for _, k := range complexValuesKeys { - v := t.values[k] + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - var commented string - if t.commented { - commented = "# " - } + return vals +} - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) if !ok { return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) start := "# " if strings.HasPrefix(comment, "#") { start = "" } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") bytesCount += int64(writtenBytesCountComment) if errc != nil { return bytesCount, errc } } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - } } } return bytesCount, nil } +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + func writeStrings(w io.Writer, s ...string) (int, error) { var n int for i := range s { @@ -286,12 +471,11 @@ func (t *Tree) WriteTo(w io.Writer) (int64, error) { // Output spans multiple lines, and is suitable for ingest by a TOML parser. // If the conversion cannot be performed, ToString returns a non-nil error. func (t *Tree) ToTomlString() (string, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) + b, err := t.Marshal() if err != nil { return "", err } - return buf.String(), nil + return string(b), nil } // String generates a human-readable representation of the current tree. diff --git a/registry-library/vendor/github.com/sirupsen/logrus/.travis.yml b/registry-library/vendor/github.com/sirupsen/logrus/.travis.yml index 5e20aa41..c1dbd5a3 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/registry-library/vendor/github.com/sirupsen/logrus/.travis.yml @@ -4,14 +4,12 @@ git: depth: 1 env: - GO111MODULE=on -go: [1.13.x, 1.14.x] -os: [linux, osx] +go: 1.15.x +os: linux install: - ./travis/install.sh script: - - ./travis/cross_build.sh - - ./travis/lint.sh - - export GOMAXPROCS=4 - - export GORACE=halt_on_error=1 - - go test -race -v ./... - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/registry-library/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/registry-library/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 584026d6..7567f612 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/registry-library/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,39 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + # 1.6.0 Fixes: * end of line cleanup diff --git a/registry-library/vendor/github.com/sirupsen/logrus/README.md b/registry-library/vendor/github.com/sirupsen/logrus/README.md index 5796706d..5152b6aa 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/README.md +++ b/registry-library/vendor/github.com/sirupsen/logrus/README.md @@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // source of the official loggers. serialized, err := json.Marshal(entry.Data) if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) } return append(serialized, '\n'), nil } diff --git a/registry-library/vendor/github.com/sirupsen/logrus/entry.go b/registry-library/vendor/github.com/sirupsen/logrus/entry.go index 5a5cbfe7..07a1e5fa 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/entry.go +++ b/registry-library/vendor/github.com/sirupsen/logrus/entry.go @@ -78,6 +78,14 @@ func NewEntry(logger *Logger) *Entry { } } +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + // Returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) @@ -123,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range fields { isErrField := false if t := reflect.TypeOf(v); t != nil { - switch t.Kind() { - case reflect.Func: + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: isErrField = true - case reflect.Ptr: - isErrField = t.Elem().Kind() == reflect.Func } } if isErrField { @@ -212,68 +218,72 @@ func (entry Entry) HasCaller() (has bool) { entry.Caller != nil } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { +func (entry *Entry) log(level Level, msg string) { var buffer *bytes.Buffer - // Default to now, but allow users to override if they want. - // - // We don't have to worry about polluting future calls to Entry#log() - // with this assignment because this function is declared with a - // non-pointer receiver. - if entry.Time.IsZero() { - entry.Time = time.Now() + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() } - entry.Level = level - entry.Message = msg - entry.Logger.mu.Lock() - if entry.Logger.ReportCaller { - entry.Caller = getCaller() + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() } - entry.Logger.mu.Unlock() - entry.fireHooks() + newEntry.fireHooks() buffer = getBuffer() defer func() { - entry.Buffer = nil + newEntry.Buffer = nil putBuffer(buffer) }() buffer.Reset() - entry.Buffer = buffer + newEntry.Buffer = buffer - entry.write() + newEntry.write() - entry.Buffer = nil + newEntry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(&entry) + panic(newEntry) } } func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, entry) + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - if _, err = entry.Logger.Out.Write(serialized); err != nil { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } @@ -319,7 +329,6 @@ func (entry *Entry) Fatal(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) { entry.Log(PanicLevel, args...) - panic(fmt.Sprint(args...)) } // Entry Printf family functions diff --git a/registry-library/vendor/github.com/sirupsen/logrus/go.sum b/registry-library/vendor/github.com/sirupsen/logrus/go.sum index 1edc143b..694c18b8 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/go.sum +++ b/registry-library/vendor/github.com/sirupsen/logrus/go.sum @@ -4,7 +4,5 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/registry-library/vendor/github.com/sirupsen/logrus/json_formatter.go b/registry-library/vendor/github.com/sirupsen/logrus/json_formatter.go index ba7f2371..c96dc563 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/registry-library/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -23,6 +23,9 @@ func (f FieldMap) resolve(key fieldKey) string { // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output @@ -118,7 +121,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { encoder.SetIndent("", " ") } if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) } return b.Bytes(), nil diff --git a/registry-library/vendor/github.com/sirupsen/logrus/logger.go b/registry-library/vendor/github.com/sirupsen/logrus/logger.go index dbf627c9..33770445 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/logger.go +++ b/registry-library/vendor/github.com/sirupsen/logrus/logger.go @@ -12,7 +12,7 @@ import ( // LogFunction For big messages, it can be more efficient to pass a function // and only call it if the log level is actually enables rather than // generating the log message and then checking if the level is enabled -type LogFunction func()[]interface{} +type LogFunction func() []interface{} type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a diff --git a/registry-library/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/registry-library/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index cc4fe6e3..04748b85 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/registry-library/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,4 +1,4 @@ -// +build linux aix +// +build linux aix zos // +build !js package logrus diff --git a/registry-library/vendor/github.com/sirupsen/logrus/text_formatter.go b/registry-library/vendor/github.com/sirupsen/logrus/text_formatter.go index 3c28b54c..be2c6efe 100644 --- a/registry-library/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/registry-library/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -53,7 +53,10 @@ type TextFormatter struct { // the time passed since beginning of execution. FullTimestamp bool - // TimestampFormat to use for display when a full timestamp is printed + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // The fields are sorted by default for a consistent output. For applications @@ -235,6 +238,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red + case InfoLevel: + levelColor = blue default: levelColor = blue } diff --git a/registry-library/vendor/google.golang.org/grpc/codes/codes.go b/registry-library/vendor/google.golang.org/grpc/codes/codes.go index 02738839..11b10618 100644 --- a/registry-library/vendor/google.golang.org/grpc/codes/codes.go +++ b/registry-library/vendor/google.golang.org/grpc/codes/codes.go @@ -33,6 +33,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +43,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +61,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +84,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +116,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +126,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +145,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +174,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 diff --git a/registry-library/vendor/google.golang.org/grpc/connectivity/connectivity.go b/registry-library/vendor/google.golang.org/grpc/connectivity/connectivity.go deleted file mode 100644 index 34ec36fb..00000000 --- a/registry-library/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package connectivity defines connectivity semantics. -// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. -package connectivity - -import ( - "context" - - "google.golang.org/grpc/grpclog" -) - -// State indicates the state of connectivity. -// It can be the state of a ClientConn or SubConn. -type State int - -func (s State) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - grpclog.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" - } -} - -const ( - // Idle indicates the ClientConn is idle. - Idle State = iota - // Connecting indicates the ClientConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool -} diff --git a/registry-library/vendor/google.golang.org/grpc/grpclog/grpclog.go b/registry-library/vendor/google.golang.org/grpc/grpclog/grpclog.go deleted file mode 100644 index 874ea6d9..00000000 --- a/registry-library/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog defines logging for grpc. -// -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" - -import "os" - -var logger = newLoggerV2() - -// V reports whether verbosity level l is at least the requested verbose level. -func V(l int) bool { - return logger.V(l) -} - -// Info logs to the INFO log. -func Info(args ...interface{}) { - logger.Info(args...) -} - -// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { - logger.Infoln(args...) -} - -// Warning logs to the WARNING log. -func Warning(args ...interface{}) { - logger.Warning(args...) -} - -// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) -} - -// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { - logger.Warningln(args...) -} - -// Error logs to the ERROR log. -func Error(args ...interface{}) { - logger.Error(args...) -} - -// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) -} - -// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { - logger.Errorln(args...) -} - -// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. -// It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { - logger.Fatal(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -// -// Deprecated: use Info. -func Print(args ...interface{}) { - logger.Info(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -// -// Deprecated: use Infof. -func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -// -// Deprecated: use Infoln. -func Println(args ...interface{}) { - logger.Infoln(args...) -} diff --git a/registry-library/vendor/google.golang.org/grpc/grpclog/logger.go b/registry-library/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index 097494f7..00000000 --- a/registry-library/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -// Logger mimics golang's standard Logger as an interface. -// -// Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -// -// Deprecated: use SetLoggerV2. -func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true -} diff --git a/registry-library/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/registry-library/vendor/google.golang.org/grpc/grpclog/loggerv2.go deleted file mode 100644 index d4932577..00000000 --- a/registry-library/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ /dev/null @@ -1,195 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "io" - "io/ioutil" - "log" - "os" - "strconv" -) - -// LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// SetLoggerV2 sets logger that is used in grpc to a V2 logger. -// Not mutex-protected, should be called before any gRPC functions. -func SetLoggerV2(l LoggerV2) { - logger = l -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int -} - -// NewLoggerV2 creates a loggerV2 with the provided writers. -// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). -// Error logs will be written to errorW, warningW and infoW. -// Warning logs will be written to warningW and infoW. -// Info logs will be written to infoW. -func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) -} - -// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and -// verbosity level. -func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} -} - -// newLoggerV2 creates a loggerV2 to be used as default logger. -// All logs are written to stderr. -func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard - - logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") - switch logLevel { - case "", "ERROR", "error": // If env is unset, set level to ERROR. - errorW = os.Stderr - case "WARNING", "warning": - warningW = os.Stderr - case "INFO", "info": - infoW = os.Stderr - } - - var v int - vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") - if vl, err := strconv.Atoi(vLevel); err == nil { - v = vl - } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) -} - -func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) -} - -func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) -} - -func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) -} - -func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) -} - -func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) -} - -func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) -} - -func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) -} - -func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) -} - -func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) -} - -func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) V(l int) bool { - return l <= g.v -} diff --git a/registry-library/vendor/google.golang.org/grpc/internal/internal.go b/registry-library/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index 0912f0bf..00000000 --- a/registry-library/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains gRPC-internal code, to avoid polluting -// the godoc of the top-level grpc package. It must not import any grpc -// symbols to avoid circular dependencies. -package internal - -import ( - "context" - "time" - - "google.golang.org/grpc/connectivity" -) - -var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption - // HealthCheckFunc is used to provide client-side LB channel health checking - HealthCheckFunc HealthChecker - // BalancerUnregister is exported by package balancer to unregister a balancer. - BalancerUnregister func(name string) - // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by - // default, but tests may wish to set it lower for convenience. - KeepaliveMinPingTime = 10 * time.Second - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult -) - -// HealthChecker defines the signature of the client-side LB channel health checking function. -// -// The implementation is expected to create a health checking RPC stream by -// calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). -// -// The health checking protocol is defined at: -// https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error - -const ( - // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. - CredsBundleModeFallback = "fallback" - // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer - // mode. - CredsBundleModeBalancer = "balancer" - // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode - // that supports backend returned by grpclb balancer. - CredsBundleModeBackendFromBalancer = "backend-from-balancer" -) diff --git a/registry-library/vendor/google.golang.org/grpc/internal/status/status.go b/registry-library/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 00000000..710223b8 --- /dev/null +++ b/registry-library/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{e: s.Proto()} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} + +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.e, tse.e) +} diff --git a/registry-library/vendor/google.golang.org/grpc/status/status.go b/registry-library/vendor/google.golang.org/grpc/status/status.go index a1348e9b..01e182c3 100644 --- a/registry-library/vendor/google.golang.org/grpc/status/status.go +++ b/registry-library/vendor/google.golang.org/grpc/status/status.go @@ -29,88 +29,23 @@ package status import ( "context" - "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). @@ -135,7 +70,7 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this @@ -160,42 +95,6 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { diff --git a/registry-library/vendor/modules.txt b/registry-library/vendor/modules.txt index 55ab5225..863403c8 100644 --- a/registry-library/vendor/modules.txt +++ b/registry-library/vendor/modules.txt @@ -1,8 +1,11 @@ -# github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 +# github.com/Microsoft/go-winio v0.4.17 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid -# github.com/Microsoft/hcsshim v0.8.7 +github.com/Microsoft/go-winio/pkg/security +github.com/Microsoft/go-winio/vhd +# github.com/Microsoft/hcsshim v0.8.16 github.com/Microsoft/hcsshim +github.com/Microsoft/hcsshim/computestorage github.com/Microsoft/hcsshim/internal/cow github.com/Microsoft/hcsshim/internal/hcs github.com/Microsoft/hcsshim/internal/hcserror @@ -19,10 +22,11 @@ github.com/Microsoft/hcsshim/internal/schema2 github.com/Microsoft/hcsshim/internal/timeout github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer +github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion -# github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f +# github.com/containerd/cgroups v1.0.1 github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.4.3 +# github.com/containerd/containerd v1.5.2 ## explicit github.com/containerd/containerd/archive/compression github.com/containerd/containerd/content @@ -32,19 +36,16 @@ github.com/containerd/containerd/filters github.com/containerd/containerd/images github.com/containerd/containerd/labels github.com/containerd/containerd/log +github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms github.com/containerd/containerd/reference github.com/containerd/containerd/remotes github.com/containerd/containerd/remotes/docker +github.com/containerd/containerd/remotes/docker/auth github.com/containerd/containerd/remotes/docker/schema1 +github.com/containerd/containerd/remotes/errors github.com/containerd/containerd/sys github.com/containerd/containerd/version -# github.com/deislabs/oras v0.8.1 -## explicit -github.com/deislabs/oras/pkg/artifact -github.com/deislabs/oras/pkg/content -github.com/deislabs/oras/pkg/context -github.com/deislabs/oras/pkg/oras # github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f ## explicit github.com/devfile/registry-support/index/generator/schema @@ -89,6 +90,12 @@ github.com/hashicorp/hcl/json/token github.com/inconshreveable/mousetrap # github.com/json-iterator/go v1.1.11 github.com/json-iterator/go +# github.com/klauspost/compress v1.11.13 +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/snappy +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/magiconair/properties v1.8.1 github.com/magiconair/properties # github.com/mitchellh/go-homedir v1.1.0 @@ -96,20 +103,22 @@ github.com/magiconair/properties github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure +# github.com/moby/locker v1.0.1 +github.com/moby/locker # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/opencontainers/go-digest v1.0.0-rc1 +# github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/pelletier/go-toml v1.2.0 +# github.com/pelletier/go-toml v1.8.1 github.com/pelletier/go-toml # github.com/pkg/errors v0.9.1 github.com/pkg/errors -# github.com/sirupsen/logrus v1.7.0 +# github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus # github.com/spf13/afero v1.2.2 github.com/spf13/afero @@ -140,7 +149,7 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c @@ -154,11 +163,9 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.27.1 +# google.golang.org/grpc v1.33.2 google.golang.org/grpc/codes -google.golang.org/grpc/connectivity -google.golang.org/grpc/grpclog -google.golang.org/grpc/internal +google.golang.org/grpc/internal/status google.golang.org/grpc/status # google.golang.org/protobuf v1.26.0 google.golang.org/protobuf/encoding/prototext @@ -227,5 +234,13 @@ k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/klog/v2 # k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 k8s.io/utils/pointer +# oras.land/oras-go v0.4.0 +## explicit +oras.land/oras-go/pkg/artifact +oras.land/oras-go/pkg/content +oras.land/oras-go/pkg/context +oras.land/oras-go/pkg/oras +# rsc.io/letsencrypt v0.0.3 +## explicit # sigs.k8s.io/structured-merge-diff/v4 v4.1.2 sigs.k8s.io/structured-merge-diff/v4/value diff --git a/registry-library/vendor/github.com/deislabs/oras/LICENSE b/registry-library/vendor/oras.land/oras-go/LICENSE similarity index 98% rename from registry-library/vendor/github.com/deislabs/oras/LICENSE rename to registry-library/vendor/oras.land/oras-go/LICENSE index 21071075..42a4c91d 100644 --- a/registry-library/vendor/github.com/deislabs/oras/LICENSE +++ b/registry-library/vendor/oras.land/oras-go/LICENSE @@ -18,4 +18,5 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + SOFTWARE. + diff --git a/registry-library/vendor/oras.land/oras-go/pkg/artifact/consts.go b/registry-library/vendor/oras.land/oras-go/pkg/artifact/consts.go new file mode 100644 index 00000000..92934872 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/artifact/consts.go @@ -0,0 +1,22 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package artifact + +const ( + // UnknownConfigMediaType is the default mediaType used when no + // config media type is specified. + UnknownConfigMediaType = "application/vnd.unknown.config.v1+json" +) diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/consts.go b/registry-library/vendor/oras.land/oras-go/pkg/content/consts.go new file mode 100644 index 00000000..ae59fdaa --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/consts.go @@ -0,0 +1,57 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // DefaultBlobMediaType specifies the default blob media type + DefaultBlobMediaType = ocispec.MediaTypeImageLayer + // DefaultBlobDirMediaType specifies the default blob directory media type + DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip +) + +const ( + // TempFilePattern specifies the pattern to create temporary files + TempFilePattern = "oras" +) + +const ( + // AnnotationDigest is the annotation key for the digest of the uncompressed content + AnnotationDigest = "io.deis.oras.content.digest" + // AnnotationUnpack is the annotation key for indication of unpacking + AnnotationUnpack = "io.deis.oras.content.unpack" +) + +const ( + // OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification + // Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file + OCIImageIndexFile = "index.json" +) + +const ( + // DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar. + // Simply uses the same size as io.Copy() + DefaultBlocksize = 32768 +) + +const ( + // what you get for a blank digest + BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") +) diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/decompressstore.go b/registry-library/vendor/oras.land/oras-go/pkg/content/decompressstore.go new file mode 100644 index 00000000..3912fc2f --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/decompressstore.go @@ -0,0 +1,158 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "strings" + + ctrcontent "github.com/containerd/containerd/content" +) + +// DecompressStore store to decompress content and extract from tar, if needed, wrapping +// another store. By default, a FileStore will simply take each artifact and write it to +// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred, +// you might want to store the actual object inside tar or gzip. Wrap your Store +// with DecompressStore, and it will check the media-type and, if relevant, +// gunzip and/or untar. +// +// For example: +// +// fileStore := NewFileStore(rootPath) +// decompressStore := store.NewDecompressStore(fileStore, WithBlocksize(blocksize)) +// +// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped, +// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream. +// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then +// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one +// target output stream. In that case, use the following example: +// +// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames +// decompressStore := store.NewDecompressStore(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester()) +// +type DecompressStore struct { + ingester ctrcontent.Ingester + blocksize int + multiWriterIngester bool +} + +func NewDecompressStore(ingester ctrcontent.Ingester, opts ...WriterOpt) DecompressStore { + // we have to reprocess the opts to find the blocksize + var wOpts WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + // TODO: we probably should handle errors here + continue + } + } + + return DecompressStore{ingester, wOpts.Blocksize, wOpts.MultiWriterIngester} +} + +// Writer get a writer +func (d DecompressStore) Writer(ctx context.Context, opts ...ctrcontent.WriterOpt) (ctrcontent.Writer, error) { + // the logic is straightforward: + // - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer + // - else, pass the regular writer + var ( + writer ctrcontent.Writer + err error + multiIngester MultiWriterIngester + ok bool + ) + + // check to see if we are supposed to use a MultiWriterIngester + if d.multiWriterIngester { + multiIngester, ok = d.ingester.(MultiWriterIngester) + if !ok { + return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter") + } + } + + // we have to reprocess the opts to find the desc + var wOpts ctrcontent.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // figure out if compression and/or archive exists + desc := wOpts.Desc + // before we pass it down, we need to strip anything we are removing here + // and possibly update the digest, since the store indexes things by digest + hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType) + wOpts.Desc.MediaType = modifiedMediaType + opts = append(opts, ctrcontent.WithDescriptor(wOpts.Desc)) + // determine if we pass it blocksize, only if positive + writerOpts := []WriterOpt{} + if d.blocksize > 0 { + writerOpts = append(writerOpts, WithBlocksize(d.blocksize)) + } + + writer, err = d.ingester.Writer(ctx, opts...) + if err != nil { + return nil, err + } + + // do we need to wrap with an untar writer? + if hasTar { + // if not multiingester, get a regular writer + if multiIngester == nil { + writer = NewUntarWriter(writer, writerOpts...) + } else { + writers, err := multiIngester.Writers(ctx, opts...) + if err != nil { + return nil, err + } + writer = NewUntarWriterByName(writers, writerOpts...) + } + } + if hasGzip { + if writer == nil { + writer, err = d.ingester.Writer(ctx, opts...) + if err != nil { + return nil, err + } + } + writer = NewGunzipWriter(writer, writerOpts...) + } + return writer, nil +} + +// checkCompression check if the mediatype uses gzip compression or tar. +// Returns if it has gzip and/or tar, as well as the base media type without +// those suffixes. +func checkCompression(mediaType string) (gzip, tar bool, mt string) { + mt = mediaType + gzipSuffix := "+gzip" + gzipAltSuffix := ".gzip" + tarSuffix := ".tar" + switch { + case strings.HasSuffix(mt, gzipSuffix): + mt = mt[:len(mt)-len(gzipSuffix)] + gzip = true + case strings.HasSuffix(mt, gzipAltSuffix): + mt = mt[:len(mt)-len(gzipAltSuffix)] + gzip = true + } + + if strings.HasSuffix(mt, tarSuffix) { + mt = mt[:len(mt)-len(tarSuffix)] + tar = true + } + return +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/errors.go b/registry-library/vendor/oras.land/oras-go/pkg/content/errors.go new file mode 100644 index 00000000..c004ab8d --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/errors.go @@ -0,0 +1,32 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import "errors" + +// Common errors +var ( + ErrNotFound = errors.New("not_found") + ErrNoName = errors.New("no_name") + ErrUnsupportedSize = errors.New("unsupported_size") + ErrUnsupportedVersion = errors.New("unsupported_version") +) + +// FileStore errors +var ( + ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed") + ErrOverwriteDisallowed = errors.New("overwrite_disallowed") +) diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/file.go b/registry-library/vendor/oras.land/oras-go/pkg/content/file.go similarity index 86% rename from registry-library/vendor/github.com/deislabs/oras/pkg/content/file.go rename to registry-library/vendor/oras.land/oras-go/pkg/content/file.go index 77b13335..4c9f6286 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/file.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/file.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package content import ( @@ -31,19 +46,28 @@ type FileStore struct { // Reproducible enables stripping times from added files Reproducible bool - root string - descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor - pathMap *sync.Map - tmpFiles *sync.Map + root string + descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor + pathMap *sync.Map + tmpFiles *sync.Map + ignoreNoName bool } // NewFileStore creats a new file store -func NewFileStore(rootPath string) *FileStore { +func NewFileStore(rootPath string, opts ...WriterOpt) *FileStore { + // we have to process the opts to find if they told us to change defaults + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + continue + } + } return &FileStore{ - root: rootPath, - descriptor: &sync.Map{}, - pathMap: &sync.Map{}, - tmpFiles: &sync.Map{}, + root: rootPath, + descriptor: &sync.Map{}, + pathMap: &sync.Map{}, + tmpFiles: &sync.Map{}, + ignoreNoName: wOpts.IgnoreNoName, } } @@ -198,7 +222,14 @@ func (s *FileStore) Writer(ctx context.Context, opts ...content.WriterOpt) (cont name, ok := ResolveName(desc) if !ok { - return nil, ErrNoName + // if we were not told to ignore NoName, then return an error + if !s.ignoreNoName { + return nil, ErrNoName + } + + // just return a nil writer - we do not want to calculate the hash, so just use + // whatever was passed in the descriptor + return NewIoContentWriter(ioutil.Discard, WithOutputHash(desc.Digest)), nil } path, err := s.resolveWritePath(name) if err != nil { diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/gunzip.go b/registry-library/vendor/oras.land/oras-go/pkg/content/gunzip.go new file mode 100644 index 00000000..65c4646f --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/gunzip.go @@ -0,0 +1,72 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "compress/gzip" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + gr, err := gzip.NewReader(r) + if err != nil { + done <- fmt.Errorf("error creating gzip reader: %v", err) + return + } + // write out the uncompressed data + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = gr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("GunzipWriter data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // clear the error + err = nil + break + } + } + gr.Close() + done <- err + }, opts...) +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/interface.go b/registry-library/vendor/oras.land/oras-go/pkg/content/interface.go new file mode 100644 index 00000000..b9daee43 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/interface.go @@ -0,0 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import "github.com/containerd/containerd/content" + +// ProvideIngester is the interface that groups the basic Read and Write methods. +type ProvideIngester interface { + content.Provider + content.Ingester +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/iowriter.go b/registry-library/vendor/oras.land/oras-go/pkg/content/iowriter.go new file mode 100644 index 00000000..e9e2ed8b --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/iowriter.go @@ -0,0 +1,112 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "io/ioutil" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to +// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device. +type IoContentWriter struct { + writer io.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// NewIoContentWriter create a new IoContentWriter. +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer { + w := writer + if w == nil { + w = ioutil.Discard + } + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + ioc := &IoContentWriter{ + writer: w, + digester: digest.Canonical.Digester(), + // we take the OutputHash, since the InputHash goes to the passthrough writer, + // which then passes the processed output to us + hash: wOpts.OutputHash, + } + return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) { + // write out the data to the io writer + var ( + err error + ) + // we could use io.Copy, but calling it with the default blocksize is identical to + // io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use + // io.Copy", when it should not matter to them + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + _, err = io.CopyBuffer(w, r, b) + done <- err + }, opts...) +} + +func (w *IoContentWriter) Write(p []byte) (n int, err error) { + n, err = w.writer.Write(p) + if err != nil { + return 0, err + } + w.size += int64(n) + if w.hash == nil { + w.digester.Hash().Write(p[:n]) + } + return +} + +func (w *IoContentWriter) Close() error { + return nil +} + +// Digest may return empty digest or panics until committed. +func (w *IoContentWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + return nil +} + +// Status returns the current state of write +func (w *IoContentWriter) Status() (content.Status, error) { + return content.Status{}, nil +} + +// Truncate updates the size of the target blob +func (w *IoContentWriter) Truncate(size int64) error { + return nil +} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/memory.go b/registry-library/vendor/oras.land/oras-go/pkg/content/memory.go similarity index 90% rename from registry-library/vendor/github.com/deislabs/oras/pkg/content/memory.go rename to registry-library/vendor/oras.land/oras-go/pkg/content/memory.go index 5c76553f..394a7a5a 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/memory.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/memory.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package content import ( diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/multireader.go b/registry-library/vendor/oras.land/oras-go/pkg/content/multireader.go new file mode 100644 index 00000000..d2cf924a --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/multireader.go @@ -0,0 +1,55 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "fmt" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MultiReader store to read content from multiple stores. It finds the content by asking each underlying +// store to find the content, which it does based on the hash. +// +// Example: +// fileStore := NewFileStore(rootPath) +// memoryStore := NewMemoryStore() +// // load up content in fileStore and memoryStore +// multiStore := MultiReader([]content.Provider{fileStore, memoryStore}) +// +// You now can use multiStore anywhere that content.Provider is accepted +type MultiReader struct { + stores []content.Provider +} + +// AddStore add a store to read from +func (m *MultiReader) AddStore(store ...content.Provider) { + m.stores = append(m.stores, store...) +} + +// ReaderAt get a reader +func (m MultiReader) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + for _, store := range m.stores { + r, err := store.ReaderAt(ctx, desc) + if r != nil && err == nil { + return r, nil + } + } + // we did not find any + return nil, fmt.Errorf("not found") +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/multiwriter.go b/registry-library/vendor/oras.land/oras-go/pkg/content/multiwriter.go new file mode 100644 index 00000000..2813f54e --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/multiwriter.go @@ -0,0 +1,31 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + + ctrcontent "github.com/containerd/containerd/content" +) + +// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single +// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer +// that is a tar file with multiple files, each of which should go to a different stream, some of which +// should not be handled at all. +type MultiWriterIngester interface { + ctrcontent.Ingester + Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (func(string) (ctrcontent.Writer, error), error) +} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/oci.go b/registry-library/vendor/oras.land/oras-go/pkg/content/oci.go similarity index 86% rename from registry-library/vendor/github.com/deislabs/oras/pkg/content/oci.go rename to registry-library/vendor/oras.land/oras-go/pkg/content/oci.go index 57c56ef1..4f1b1b32 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/oci.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/oci.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package content import ( @@ -98,6 +113,8 @@ func (s *OCIStore) AddReference(name string, desc ocispec.Descriptor) { } if _, ok := s.nameMap[name]; ok { + s.nameMap[name] = desc + for i, ref := range s.index.Manifests { if name == ref.Annotations[ocispec.AnnotationRefName] { s.index.Manifests[i] = desc @@ -107,11 +124,12 @@ func (s *OCIStore) AddReference(name string, desc ocispec.Descriptor) { // Process should not reach here. // Fallthrough to `Add` scenario and recover. + s.index.Manifests = append(s.index.Manifests, desc) + return } s.index.Manifests = append(s.index.Manifests, desc) s.nameMap[name] = desc - return } // DeleteReference deletes an reference from index. diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/opts.go b/registry-library/vendor/oras.land/oras-go/pkg/content/opts.go new file mode 100644 index 00000000..f6eaac69 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/opts.go @@ -0,0 +1,112 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "errors" + + "github.com/opencontainers/go-digest" +) + +type WriterOpts struct { + InputHash *digest.Digest + OutputHash *digest.Digest + Blocksize int + MultiWriterIngester bool + IgnoreNoName bool +} + +type WriterOpt func(*WriterOpts) error + +func DefaultWriterOpts() WriterOpts { + return WriterOpts{ + InputHash: nil, + OutputHash: nil, + Blocksize: DefaultBlocksize, + IgnoreNoName: true, + } +} + +// WithInputHash provide the expected input hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *input* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithInputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.InputHash = &hash + return nil + } +} + +// WithOutputHash provide the expected output hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *output* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithOutputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.OutputHash = &hash + return nil + } +} + +// WithBlocksize set the blocksize used by the processor of data. +// The default is DefaultBlocksize, which is the same as that used by io.Copy. +// Includes a safety check to ensure the caller doesn't actively set it to <= 0. +func WithBlocksize(blocksize int) WriterOpt { + return func(w *WriterOpts) error { + if blocksize <= 0 { + return errors.New("blocksize must be greater than or equal to 0") + } + w.Blocksize = blocksize + return nil + } +} + +// WithMultiWriterIngester the passed ingester also implements MultiWriter +// and should be used as such. If this is set to true, but the ingester does not +// implement MultiWriter, calling Writer should return an error. +func WithMultiWriterIngester() WriterOpt { + return func(w *WriterOpts) error { + w.MultiWriterIngester = true + return nil + } +} + +// WithErrorOnNoName some ingesters, when creating a Writer, do not return an error if +// the descriptor does not have a valid name on the descriptor. Passing WithErrorOnNoName +// tells the writer to return an error instead of passing the data to a nil writer. +func WithErrorOnNoName() WriterOpt { + return func(w *WriterOpts) error { + w.IgnoreNoName = false + return nil + } +} + +// WithIgnoreNoName some ingesters, when creating a Writer, return an error if +// the descriptor does not have a valid name on the descriptor. Passing WithIgnoreNoName +// tells the writer not to return an error, but rather to pass the data to a nil writer. +// +// Deprecated: Use WithErrorOnNoName +func WithIgnoreNoName() WriterOpt { + return func(w *WriterOpts) error { + w.IgnoreNoName = true + return nil + } +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/passthrough.go b/registry-library/vendor/oras.land/oras-go/pkg/content/passthrough.go new file mode 100644 index 00000000..d54c4256 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/passthrough.go @@ -0,0 +1,286 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// PassthroughWriter takes an input stream and passes it through to an underlying writer, +// while providing the ability to manipulate the stream before it gets passed through +type PassthroughWriter struct { + writer content.Writer + pipew *io.PipeWriter + digester digest.Digester + size int64 + underlyingWriter *underlyingWriter + reader *io.PipeReader + hash *digest.Digest + done chan error +} + +// NewPassthroughWriter creates a pass-through writer that allows for processing +// the content via an arbitrary function. The function should do whatever processing it +// wants, reading from the Reader to the Writer. When done, it must indicate via +// sending an error or nil to the Done +func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + r, w := io.Pipe() + pw := &PassthroughWriter{ + writer: writer, + pipew: w, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + reader: r, + hash: wOpts.InputHash, + done: make(chan error, 1), + } + go f(r, pw.underlyingWriter, pw.done) + return pw +} + +func (pw *PassthroughWriter) Write(p []byte) (n int, err error) { + n, err = pw.pipew.Write(p) + if pw.hash == nil { + pw.digester.Hash().Write(p[:n]) + } + pw.size += int64(n) + return +} + +func (pw *PassthroughWriter) Close() error { + if pw.pipew != nil { + pw.pipew.Close() + } + pw.writer.Close() + return nil +} + +// Digest may return empty digest or panics until committed. +func (pw *PassthroughWriter) Digest() digest.Digest { + if pw.hash != nil { + return *pw.hash + } + return pw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + if pw.pipew != nil { + pw.pipew.Close() + } + err := <-pw.done + if pw.reader != nil { + pw.reader.Close() + } + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) +} + +// Status returns the current state of write +func (pw *PassthroughWriter) Status() (content.Status, error) { + return pw.writer.Status() +} + +// Truncate updates the size of the target blob +func (pw *PassthroughWriter) Truncate(size int64) error { + return pw.writer.Truncate(size) +} + +// underlyingWriter implementation of io.Writer to write to the underlying +// io.Writer +type underlyingWriter struct { + writer content.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// Write write to the underlying writer +func (u *underlyingWriter) Write(p []byte) (int, error) { + n, err := u.writer.Write(p) + if err != nil { + return 0, err + } + + if u.hash == nil { + u.digester.Hash().Write(p) + } + u.size += int64(len(p)) + return n, nil +} + +// Size get total size written +func (u *underlyingWriter) Size() int64 { + return u.size +} + +// Digest may return empty digest or panics until committed. +func (u *underlyingWriter) Digest() digest.Digest { + if u.hash != nil { + return *u.hash + } + return u.digester.Digest() +} + +// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough +// function to select which writer to use. +type PassthroughMultiWriter struct { + writers []*PassthroughWriter + pipew *io.PipeWriter + digester digest.Digester + size int64 + reader *io.PipeReader + hash *digest.Digest + done chan error + startedAt time.Time + updatedAt time.Time +} + +func NewPassthroughMultiWriter(writers func(name string) (content.Writer, error), f func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + r, w := io.Pipe() + + pmw := &PassthroughMultiWriter{ + startedAt: time.Now(), + updatedAt: time.Now(), + done: make(chan error, 1), + digester: digest.Canonical.Digester(), + hash: wOpts.InputHash, + pipew: w, + reader: r, + } + + // get our output writers + getwriter := func(name string) io.Writer { + writer, err := writers(name) + if err != nil || writer == nil { + return nil + } + pw := &PassthroughWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + done: make(chan error, 1), + } + pmw.writers = append(pmw.writers, pw) + return pw.underlyingWriter + } + go f(r, getwriter, pmw.done) + return pmw +} + +func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) { + n, err = pmw.pipew.Write(p) + if pmw.hash == nil { + pmw.digester.Hash().Write(p[:n]) + } + pmw.size += int64(n) + pmw.updatedAt = time.Now() + return +} + +func (pmw *PassthroughMultiWriter) Close() error { + pmw.pipew.Close() + for _, w := range pmw.writers { + w.Close() + } + return nil +} + +// Digest may return empty digest or panics until committed. +func (pmw *PassthroughMultiWriter) Digest() digest.Digest { + if pmw.hash != nil { + return *pmw.hash + } + return pmw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + pmw.pipew.Close() + err := <-pmw.done + if pmw.reader != nil { + pmw.reader.Close() + } + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + for _, w := range pmw.writers { + // maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) + w.done <- err + if err := w.Commit(ctx, size, expected, opts...); err != nil { + return err + } + } + return nil +} + +// Status returns the current state of write +func (pmw *PassthroughMultiWriter) Status() (content.Status, error) { + return content.Status{ + StartedAt: pmw.startedAt, + UpdatedAt: pmw.updatedAt, + Total: pmw.size, + }, nil +} + +// Truncate updates the size of the target blob, but cannot do anything with a multiwriter +func (pmw *PassthroughMultiWriter) Truncate(size int64) error { + return errors.New("truncate unavailable on multiwriter") +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/readerat.go b/registry-library/vendor/oras.land/oras-go/pkg/content/readerat.go new file mode 100644 index 00000000..41e892ee --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/readerat.go @@ -0,0 +1,49 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "io" + + "github.com/containerd/containerd/content" +) + +// ensure interface +var ( + _ content.ReaderAt = sizeReaderAt{} +) + +type readAtCloser interface { + io.ReaderAt + io.Closer +} + +type sizeReaderAt struct { + readAtCloser + size int64 +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +type nopCloser struct { + io.ReaderAt +} + +func (nopCloser) Close() error { + return nil +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/content/untar.go b/registry-library/vendor/oras.land/oras-go/pkg/content/untar.go new file mode 100644 index 00000000..fe95a81a --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/untar.go @@ -0,0 +1,157 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "archive/tar" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewUntarWriter wrap a writer with an untar, so that the stream is untarred +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + _, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} + +// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed +// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func +// to determine how to process it. +func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + // need a PassthroughMultiWriter here + return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + header, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // get the filename + filename := header.Name + + // get the writer for this filename + w := getwriter(filename) + if w == nil { + continue + } + + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/content/utils.go b/registry-library/vendor/oras.land/oras-go/pkg/content/utils.go similarity index 68% rename from registry-library/vendor/github.com/deislabs/oras/pkg/content/utils.go rename to registry-library/vendor/oras.land/oras-go/pkg/content/utils.go index d4095d1d..c31c54aa 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/content/utils.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/content/utils.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package content import ( @@ -101,15 +116,24 @@ func extractTarDirectory(root, prefix string, r io.Reader) error { // Name check name := header.Name - path, err := filepath.Rel(prefix, name) + path, err := ensureBasePath(root, prefix, name) if err != nil { return err } - if strings.HasPrefix(path, "../") { - return fmt.Errorf("%q does not have prefix %q", name, prefix) - } path = filepath.Join(root, path) + // Link check + switch header.Typeflag { + case tar.TypeLink, tar.TypeSymlink: + link := header.Linkname + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) + } + if _, err := ensureBasePath(root, prefix, link); err != nil { + return err + } + } + // Create content switch header.Typeflag { case tar.TypeReg: @@ -132,6 +156,34 @@ func extractTarDirectory(root, prefix string, r io.Reader) error { } } +// ensureBasePath ensures the target path is in the base path, +// returning its relative path to the base path. +func ensureBasePath(root, base, target string) (string, error) { + path, err := filepath.Rel(base, target) + if err != nil { + return "", err + } + cleanPath := filepath.ToSlash(filepath.Clean(path)) + if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") { + return "", fmt.Errorf("%q is outside of %q", target, base) + } + + // No symbolic link allowed in the relative path + dir := filepath.Dir(path) + for dir != "." { + if info, err := os.Lstat(filepath.Join(root, dir)); err != nil { + if !os.IsNotExist(err) { + return "", err + } + } else if info.Mode()&os.ModeSymlink != 0 { + return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target) + } + dir = filepath.Dir(dir) + } + + return path, nil +} + func writeFile(path string, r io.Reader, perm os.FileMode) error { file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { diff --git a/registry-library/vendor/oras.land/oras-go/pkg/context/context.go b/registry-library/vendor/oras.land/oras-go/pkg/context/context.go new file mode 100644 index 00000000..af0ac846 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/context/context.go @@ -0,0 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package context + +import "context" + +// Background returns a default context with logger discarded. +func Background() context.Context { + ctx := context.Background() + return WithLoggerDiscarded(ctx) +} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/context/logger.go b/registry-library/vendor/oras.land/oras-go/pkg/context/logger.go similarity index 66% rename from registry-library/vendor/github.com/deislabs/oras/pkg/context/logger.go rename to registry-library/vendor/oras.land/oras-go/pkg/context/logger.go index b83f2779..2fb2e527 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/context/logger.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/context/logger.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package context import ( diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/errors.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/errors.go similarity index 52% rename from registry-library/vendor/github.com/deislabs/oras/pkg/oras/errors.go rename to registry-library/vendor/oras.land/oras-go/pkg/oras/errors.go index 1812a060..054af764 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/errors.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/errors.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package oras import ( @@ -8,7 +23,6 @@ import ( // Common errors var ( ErrResolverUndefined = errors.New("resolver undefined") - ErrEmptyDescriptors = errors.New("empty descriptors") ) // Path validation related errors diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/pull.go similarity index 84% rename from registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull.go rename to registry-library/vendor/oras.land/oras-go/pkg/oras/pull.go index d578e7bd..8fe55091 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/pull.go @@ -1,11 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package oras import ( "context" "sync" - orascontent "github.com/deislabs/oras/pkg/content" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" @@ -13,6 +26,8 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/semaphore" + + orascontent "oras.land/oras-go/pkg/content" ) // Pull pull files from the remote @@ -61,7 +76,7 @@ func fetchContent(ctx context.Context, fetcher remotes.Fetcher, desc ocispec.Des store := opts.contentProvideIngester if store == nil { - store = newHybridStoreFromIngester(ingester) + store = newHybridStoreFromIngester(ingester, opts.cachedMediaTypes) } handlers := []images.Handler{ filterHandler(opts, opts.allowedMediaTypes...), diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull_opts.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/pull_opts.go similarity index 52% rename from registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull_opts.go rename to registry-library/vendor/oras.land/oras-go/pkg/oras/pull_opts.go index 2e604fab..907c27cf 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/pull_opts.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/pull_opts.go @@ -1,13 +1,32 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package oras import ( "context" - - orascontent "github.com/deislabs/oras/pkg/content" + "fmt" + "io" + "sync" "github.com/containerd/containerd/images" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sync/semaphore" + + orascontent "oras.land/oras-go/pkg/content" ) type pullOpts struct { @@ -17,6 +36,7 @@ type pullOpts struct { callbackHandlers []images.Handler contentProvideIngester orascontent.ProvideIngester filterName func(ocispec.Descriptor) bool + cachedMediaTypes []string } // PullOpt allows callers to set options on the oras pull @@ -24,8 +44,26 @@ type PullOpt func(o *pullOpts) error func pullOptsDefaults() *pullOpts { return &pullOpts{ - dispatch: images.Dispatch, - filterName: filterName, + dispatch: images.Dispatch, + filterName: filterName, + cachedMediaTypes: []string{ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex}, + } +} + +// WithCachedMediaTypes sets the media types normally cached in memory when pulling. +func WithCachedMediaTypes(cachedMediaTypes ...string) PullOpt { + return func(o *pullOpts) error { + o.cachedMediaTypes = cachedMediaTypes + return nil + } +} + +// WithAdditionalCachedMediaTypes adds media types normally cached in memory when pulling. +// This does not replace the default media types, but appends to them +func WithAdditionalCachedMediaTypes(cachedMediaTypes ...string) PullOpt { + return func(o *pullOpts) error { + o.cachedMediaTypes = append(o.cachedMediaTypes, cachedMediaTypes...) + return nil } } @@ -87,3 +125,26 @@ func WithPullEmptyNameAllowed() PullOpt { return nil } } + +// WithPullStatusTrack report results to stdout +func WithPullStatusTrack(writer io.Writer) PullOpt { + return WithPullCallbackHandler(pullStatusTrack(writer)) +} + +func pullStatusTrack(writer io.Writer) images.Handler { + var printLock sync.Mutex + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := orascontent.ResolveName(desc); ok { + digestString := desc.Digest.String() + if err := desc.Digest.Validate(); err == nil { + if algo := desc.Digest.Algorithm(); algo == digest.SHA256 { + digestString = desc.Digest.Encoded()[:12] + } + } + printLock.Lock() + defer printLock.Unlock() + fmt.Fprintln(writer, "Downloaded", digestString, name) + } + return nil, nil + }) +} diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/push.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/push.go similarity index 76% rename from registry-library/vendor/github.com/deislabs/oras/pkg/oras/push.go rename to registry-library/vendor/oras.land/oras-go/pkg/oras/push.go index be773bed..93542f5c 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/push.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/push.go @@ -1,3 +1,18 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package oras import ( @@ -7,10 +22,11 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/containerd/remotes" - artifact "github.com/deislabs/oras/pkg/artifact" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + artifact "oras.land/oras-go/pkg/artifact" ) // Push pushes files to the remote @@ -18,9 +34,6 @@ func Push(ctx context.Context, resolver remotes.Resolver, ref string, provider c if resolver == nil { return ocispec.Descriptor{}, ErrResolverUndefined } - if len(descriptors) == 0 { - return ocispec.Descriptor{}, ErrEmptyDescriptors - } opt := pushOptsDefaults() for _, o := range opts { if err := o(opt); err != nil { @@ -52,7 +65,7 @@ func Push(ctx context.Context, resolver remotes.Resolver, ref string, provider c } } - if err := remotes.PushContent(ctx, pusher, desc, store, nil, wrapper); err != nil { + if err := remotes.PushContent(ctx, pusher, desc, store, nil, nil, wrapper); err != nil { return ocispec.Descriptor{}, err } return desc, nil @@ -60,7 +73,7 @@ func Push(ctx context.Context, resolver remotes.Resolver, ref string, provider c //func pack(store *hybridStore, descriptors []ocispec.Descriptor, opts *pushOpts) (ocispec.Descriptor, error) { func pack(provider content.Provider, descriptors []ocispec.Descriptor, opts *pushOpts) (ocispec.Descriptor, content.Store, error) { - store := newHybridStoreFromProvider(provider) + store := newHybridStoreFromProvider(provider, nil) // Config var config ocispec.Descriptor @@ -87,6 +100,9 @@ func pack(provider content.Provider, descriptors []ocispec.Descriptor, opts *pus return *opts.manifest, store, nil } + if descriptors == nil { + descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } manifest := ocispec.Manifest{ Versioned: specs.Versioned{ SchemaVersion: 2, // historical value. does not pertain to OCI or docker version diff --git a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/push_opts.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/push_opts.go similarity index 74% rename from registry-library/vendor/github.com/deislabs/oras/pkg/oras/push_opts.go rename to registry-library/vendor/oras.land/oras-go/pkg/oras/push_opts.go index 09062054..1ae7b638 100644 --- a/registry-library/vendor/github.com/deislabs/oras/pkg/oras/push_opts.go +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/push_opts.go @@ -1,14 +1,33 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package oras import ( + "context" + "fmt" + "io" "path/filepath" "strings" + "sync" "github.com/containerd/containerd/images" - orascontent "github.com/deislabs/oras/pkg/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + + orascontent "oras.land/oras-go/pkg/content" ) type pushOpts struct { @@ -127,3 +146,20 @@ func WithPushBaseHandler(handlers ...images.Handler) PushOpt { return nil } } + +// WithPushStatusTrack report results to a provided writer +func WithPushStatusTrack(writer io.Writer) PushOpt { + return WithPushBaseHandler(pushStatusTrack(writer)) +} + +func pushStatusTrack(writer io.Writer) images.Handler { + var printLock sync.Mutex + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := orascontent.ResolveName(desc); ok { + printLock.Lock() + defer printLock.Unlock() + fmt.Fprintln(writer, "Uploading", desc.Digest.Encoded()[:12], name) + } + return nil, nil + }) +} diff --git a/registry-library/vendor/oras.land/oras-go/pkg/oras/store.go b/registry-library/vendor/oras.land/oras-go/pkg/oras/store.go new file mode 100644 index 00000000..33d87345 --- /dev/null +++ b/registry-library/vendor/oras.land/oras-go/pkg/oras/store.go @@ -0,0 +1,239 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "context" + "errors" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + + orascontent "oras.land/oras-go/pkg/content" +) + +// ensure interface +var ( + _ content.Store = &hybridStore{} +) + +type hybridStore struct { + cache *orascontent.Memorystore + cachedMediaTypes []string + provider content.Provider + ingester content.Ingester +} + +func newHybridStoreFromProvider(provider content.Provider, cachedMediaTypes []string) *hybridStore { + return &hybridStore{ + cache: orascontent.NewMemoryStore(), + cachedMediaTypes: cachedMediaTypes, + provider: provider, + } +} + +func newHybridStoreFromIngester(ingester content.Ingester, cachedMediaTypes []string) *hybridStore { + return &hybridStore{ + cache: orascontent.NewMemoryStore(), + cachedMediaTypes: cachedMediaTypes, + ingester: ingester, + } +} + +func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) { + s.cache.Set(desc, content) +} + +// ReaderAt provides contents +func (s *hybridStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + readerAt, err := s.cache.ReaderAt(ctx, desc) + if err == nil { + return readerAt, nil + } + if s.provider != nil { + return s.provider.ReaderAt(ctx, desc) + } + return nil, err +} + +// Writer begins or resumes the active writer identified by desc +func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + + if isAllowedMediaType(wOpts.Desc.MediaType, s.cachedMediaTypes...) || s.ingester == nil { + cacheWriter, err := s.cache.Writer(ctx, opts...) + if err != nil { + return nil, err + } + ingesterWriter, err := s.ingester.Writer(ctx, opts...) + if err != nil { + return nil, err + } + return newTeeWriter(wOpts.Desc, cacheWriter, ingesterWriter), nil + } + return s.ingester.Writer(ctx, opts...) +} + +// TODO: implement (needed to create a content.Store) +// TODO: do not return empty content.Info +// Abort completely cancels the ingest operation targeted by ref. +func (s *hybridStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + return content.Info{}, nil +} + +// TODO: implement (needed to create a content.Store) +// Update updates mutable information related to content. +// If one or more fieldpaths are provided, only those +// fields will be updated. +// Mutable fields: +// labels.* +func (s *hybridStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Walk will call fn for each item in the content store which +// match the provided filters. If no filters are given all +// items will be walked. +func (s *hybridStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return errors.New("not yet implemented: Walk (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Delete removes the content from the store. +func (s *hybridStore) Delete(ctx context.Context, dgst digest.Digest) error { + return errors.New("not yet implemented: Delete (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +func (s *hybridStore) Status(ctx context.Context, ref string) (content.Status, error) { + // Status returns the status of the provided ref. + return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// ListStatuses returns the status of any active ingestions whose ref match the +// provided regular expression. If empty, all active ingestions will be +// returned. +func (s *hybridStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Abort completely cancels the ingest operation targeted by ref. +func (s *hybridStore) Abort(ctx context.Context, ref string) error { + return errors.New("not yet implemented: Abort (content.Store interface)") +} + +// teeWriter tees the content to one or more content.Writer +type teeWriter struct { + writers []content.Writer + digester digest.Digester + status content.Status +} + +func newTeeWriter(desc ocispec.Descriptor, writers ...content.Writer) *teeWriter { + now := time.Now() + return &teeWriter{ + writers: writers, + digester: digest.Canonical.Digester(), + status: content.Status{ + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + } +} + +func (t *teeWriter) Close() error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Close() + }) + } + return g.Wait() +} + +func (t *teeWriter) Write(p []byte) (n int, err error) { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + n, err := w.Write(p[:]) + if err != nil { + return err + } + if n != len(p) { + return io.ErrShortWrite + } + return nil + }) + } + err = g.Wait() + n = len(p) + if err != nil { + return n, err + } + _, _ = t.digester.Hash().Write(p[:n]) + t.status.Offset += int64(len(p)) + t.status.UpdatedAt = time.Now() + + return n, nil +} + +// Digest may return empty digest or panics until committed. +func (t *teeWriter) Digest() digest.Digest { + return t.digester.Digest() +} + +func (t *teeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Commit(ctx, size, expected, opts...) + }) + } + return g.Wait() +} + +// Status returns the current state of write +func (t *teeWriter) Status() (content.Status, error) { + return t.status, nil +} + +// Truncate updates the size of the target blob +func (t *teeWriter) Truncate(size int64) error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Truncate(size) + }) + } + return g.Wait() +}