From 3161e2512b069613c04e110d4ef1eab0822a3b01 Mon Sep 17 00:00:00 2001 From: Alex Couture-Beil Date: Thu, 28 Oct 2021 12:51:02 -0700 Subject: [PATCH] Acb/earthly main (#54) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * integration: add common context base to all integration tests Signed-off-by: Tonis Tiigi * vendor: update opentelemetry to 1.0.0-rc Signed-off-by: Tonis Tiigi * add current tracing context detection and exec propagation Signed-off-by: Tonis Tiigi (cherry picked from commit bc9a83144c83e9fd78007b7bfe92e8082c59d40e) * add transform package to convert from otlp Signed-off-by: Tonis Tiigi * tracing: add delegated exporter Signed-off-by: Tonis Tiigi * otlgrpc: provide a way to get otlp client from grpc conn Hopefully this can be removed with a future upstream change that could make this configurable. The package also needs internal dependency that is copied in. Signed-off-by: Tonis Tiigi * enable collecting traces via control api Signed-off-by: Tonis Tiigi * support collecting traces from llb.Exec Signed-off-by: Tonis Tiigi * client: pass delegated exporter as parameter Avoid client package having dependency on global detect package. Signed-off-by: Tonis Tiigi * update runc binary to v1.0.0 GA Signed-off-by: Akihiro Suda * handle unconfigured spans without errors Signed-off-by: Tonis Tiigi * llb: add constraints to vertex and validate Signed-off-by: Tonis Tiigi * llb: add constraints to async llb Signed-off-by: Tonis Tiigi * llb: ensure meta resolver uses platform form constraints Signed-off-by: Tonis Tiigi * flightcontrol: reduce contention between goroutines Signed-off-by: Tonis Tiigi * Avoid nil pointer dereference when copying from image with no layers Fix this panic when copying from an image with no layers: ``` panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x50 pc=0xdd8c17] goroutine 326 [running]: github.com/moby/buildkit/cache/contenthash.(*cacheManager).Checksum(0xc0005ec030, 0x1682c00, 0xc000842140, 0x0, 0x0, 0xc0005d4023, 0x1, 0x0, 0x0, 0x0, ...) /src/cache/contenthash/checksum.go:95 +0x37 github.com/moby/buildkit/cache/contenthash.Checksum(0x1682c00, 0xc000842140, 0x0, 0x0, 0xc0005d4023, 0x1, 0x0, 0x0, 0x0, 0x0, ...) /src/cache/contenthash/checksum.go:59 +0xd5 github.com/moby/buildkit/solver/llbsolver.NewContentHashFunc.func1.1(0x0, 0x4425d6) /src/solver/llbsolver/result.go:59 +0x20a golang.org/x/sync/errgroup.(*Group).Go.func1(0xc00056a360, 0xc000594510) /src/vendor/golang.org/x/sync/errgroup/errgroup.go:57 +0x59 created by golang.org/x/sync/errgroup.(*Group).Go /src/vendor/golang.org/x/sync/errgroup/errgroup.go:54 +0x66 ``` When the path is "/", we allow it because it's a noop. Based on https://github.com/moby/buildkit/pull/2185 Signed-off-by: Aaron Lehmann * Add test for copying from scratch Signed-off-by: Aaron Lehmann * Check that scratch is mounted as empty dir Signed-off-by: Aaron Lehmann * Make error message consistent when layer is empty Signed-off-by: Aaron Lehmann * Test with tonistiigi/test:nolayers as well Signed-off-by: Aaron Lehmann * ensure containerd io is complete and closed before returning Signed-off-by: Cory Bennett * [#2112] progress.Controller should own the progress.Writer to prevent leaks Signed-off-by: Cory Bennett * [#2112] progress.FromContext returns a writer factory this allows progress.Controller to manage the writer lifecycle Signed-off-by: Cory Bennett * contenthash: use SeekLowerBound to seek radix tree Signed-off-by: Tonis Tiigi * dockerfile: fix git version detection Signed-off-by: Tonis Tiigi * Add support for heredocs with ONBUILD Signed-off-by: Justin Chadwell * dockerfile: use none differ for dockerfile/dockerignore This avoids wrong metadata matches on small files Signed-off-by: Tonis Tiigi * progressui: print logs for failed step as summary in plain mode Signed-off-by: Tonis Tiigi * grpcerrors: avoid rpc error wrapping in error messages Signed-off-by: Tonis Tiigi * exec: improve error message on exec errors Signed-off-by: Tonis Tiigi * Improve heredoc parsing to allow more generic words Previously, heredoc names were restricted to simple alphanumeric strings. However, heredocs should support much more complex use-cases, including quoting anywhere, as well as allowing special symbols like `.` for easily expressing file extensions. This patch adds support for these more complex cases, by using the shell lexer to parse each heredoc name. Additionally, we include improvements to the lexer to optionally preserve escape tokens to avoid problems when lexing words that have already been lexed before. Signed-off-by: Justin Chadwell * Improve progress and history messages for heredoc-related commands Signed-off-by: Justin Chadwell * Remove unneeded Finalize method from ImmutableRef. Finalize was only used outside the cache package in one place, which called it with the commit arg set to false. The code path followed when commit==false turned out to essentially be a no-op because it set "retain cache" to true if it was already set to true. It was thus safe to remove the only external call to it and remove it from the interface. This should be helpful for future efforts to simplify the equal{Mutable,Immutable} fields in cacheRecord, which exist due to the "lazy commit" feature that Finalize is tied into. Signed-off-by: Erik Sipsma * Fix ref leak if fileop ref fails to mount. Signed-off-by: Erik Sipsma * add error suggest pkg Signed-off-by: Tonis Tiigi * dockerfile: suggest mistyped flag names Signed-off-by: Tonis Tiigi * dockerfile: provide suggestions for mount options Signed-off-by: Tonis Tiigi * dockerfile: add tests for error suggestions Signed-off-by: Tonis Tiigi * dockerfile: remove unnecessary error wrappings Signed-off-by: Tonis Tiigi * enable riscv64 build Signed-off-by: Tonis Tiigi * Update QEMU emulators Signed-off-by: CrazyMax * dockerfile: move run network to stable channel Signed-off-by: Tonis Tiigi * Automatically detect default git branch Instead of just assuming that the default branch is master, use ls-remote to find out. Also removed tests that didn't specifiy a branch but required authentication, because those will fail now that the repo is actually checked. Signed-off-by: Levi Harrison * Moved getDefaultBranch function to gitsource It is my suspecion that the tests were failing on previous commits because of the lack of authentication and other stuff like that available in gitidentifier as compared to gitsource Signed-off-by: Levi Harrison * Fix tests Unfortunately, further test cases will have to be removed because gitindentifier will now leave the branch blank instead of filling it in Signed-off-by: Levi Harrison * git: fix default branch detection Signed-off-by: Tonis Tiigi * Enable to forcefully specify compression type Signed-off-by: ktock * Add full timestamp to logs Signed-off-by: Yamazaki Masashi * Remove meaningless encode Signed-off-by: Yamazaki Masashi * Ignore missing providers for blobs w/ same chainid. GetByBlob checks to see if there are any other blobs with the same (uncompressed) ChainID and, if so, reuses their unpacked snapshot if it exists. The problem is if this code finds a match, it was trying to get the matching record, but couldn't do so when the match is lazy because the caller doesn't necessarily have descriptor handlers setup for it. This commit changes the behavior to just ignore any match with the same ChainID that's also lazy as they just aren't usable for the snapshot-reuse optimization. Signed-off-by: Erik Sipsma * authprovider: handle eaccess on storing token seeds Signed-off-by: Tonis Tiigi * log with traceID and spanID Signed-off-by: Morlay * github: update CI buildkit to v0.9.0-rc1 Signed-off-by: Tonis Tiigi * initial version of github cache Signed-off-by: Tonis Tiigi * vendor: add goactionscache Signed-off-by: Tonis Tiigi * caps: add cap for gha cache backend Signed-off-by: Tonis Tiigi * remove tracetransform package Signed-off-by: Tonis Tiigi * resolver: increase default idle conns reuse The current default were even lower than stdlib defaults. Signed-off-by: Tonis Tiigi * refactor to use util/bklog instead of using logurs directly Signed-off-by: Morlay * GitHub Actions cache docs Signed-off-by: CrazyMax * Skips getting UID/GUID if passwd/group file is not found When running a WORKDIR instruction, buildkit will create that folder and chown it to the currently set user. For this, it will try to read the /etc/passwd file to get the proper UID, and if that user is not found in the file, the root user will be considered as the owner. However, Windows image do not have that file, which will result in an error while building the image. We can consider not finding the /etc/passwd file as the same as not finding the user in the file, which would solve this issue. Signed-off-by: Claudiu Belu * add per domain semaphore to limit concurrent connections This is a safer alternative until we figure out why http.Transport based limiting fails. Some connections like cache export/import do not have a domain key atm and these connections use global pool. Signed-off-by: Tonis Tiigi * update to github.com/containerd/containerd v1.5.3 Signed-off-by: coryb * vendor: update go-actions-cache with custom client support Signed-off-by: Tonis Tiigi * tracing: update to otelhttp roundtripper Signed-off-by: Tonis Tiigi * Enhance test matrix Signed-off-by: CrazyMax * fix dropped pull progress output due to canceled context fixes #2248 Signed-off-by: coryb * Add span for layer export This can be a significant amount of time that isn't currently accounted for in traces. Signed-off-by: Aaron Lehmann * new implementation for limiting tcp connections The previous implementation had many issues. Eg. on fetch, even if the data already existed and no remote connections were needed the request would still be waiting in the queue. Or if two fetches of same blob happened together they would take up two places in queue although there was only one remote request. Signed-off-by: Tonis Tiigi * limited: allow extra high-priority connection for json requests Signed-off-by: Tonis Tiigi * ensure wrappers support seeking to continue partial downloads Signed-off-by: Tonis Tiigi * contentutil: change offset to int64 to simplify Signed-off-by: Tonis Tiigi * Exporter config digest typo Signed-off-by: CrazyMax * daemonless: wait for daemon to finish before exit Signed-off-by: Tonis Tiigi * github: update CI buildkit to v0.9.0 Signed-off-by: Tonis Tiigi * add docs for new config options Signed-off-by: Tonis Tiigi * add ktock and crazy-max to maintainers Signed-off-by: Tonis Tiigi * Update Dockerfile references to use 1.3 Signed-off-by: Tonis Tiigi * docs: update images-readme to v0.9 Signed-off-by: Tonis Tiigi * Bump to codecov/codecov-action v2 Signed-off-by: CrazyMax * build(deps): bump github.com/containerd/containerd from 1.5.3 to 1.5.4 Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.5.3 to 1.5.4. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v1.5.3...v1.5.4) --- updated-dependencies: - dependency-name: github.com/containerd/containerd dependency-type: direct:production ... Signed-off-by: dependabot[bot] * util/tracing: remove incorrect import enforcing comment This import comment caused compilation of buildx to fail if `GO111MODULE` was set to `off`: Without `GO111MODULE` set (but with `-mod=vendor`: echo $GO111MODULE export PKG=github.com/docker/buildx export LDFLAGS="-X ${PKG}/version.Version=$(git describe --match 'v[0-9]*' --always --tags) -X ${PKG}/version.Revision=$(git rev-parse HEAD) -X ${PKG}/version.Package=${PKG}" GOFLAGS=-mod=vendor go build -o bin/docker-buildx -ldflags "${LDFLAGS}" ./cmd/buildx bin/docker-buildx version github.com/docker/buildx v0.6.0 d9ee3b134cbc2d09513fa7fee4176a3919e05887 When setting `GO111MODULE=off`, it fails on the incorrect import path in the vendored file (looks like GO111MODULE=on ignores import-path comments?): export GO111MODULE=off root@5a55ec1c1eed:/go/src/github.com/docker/buildx# GOFLAGS=-mod=vendor go build -o bin/docker-buildx -ldflags "${LDFLAGS}" ./cmd/buildx vendor/github.com/moby/buildkit/client/client.go:20:2: code in directory /go/src/github.com/docker/buildx/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc expects import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection/connection.go:33:2: found import comments "go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig" (options.go) and "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" (optiontypes.go) in /go/src/github.com/docker/buildx/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig Signed-off-by: Sebastiaan van Stijn * Fix protoc link Signed-off-by: CrazyMax * Allow ExitError type to be transmitted over GRPC This will allow clients to retrieve exit error codes returned during a solve without parsing the error messages. Signed-off-by: Aaron Lehmann * Update to github.com/opencontainers/runc v1.0.1 Signed-off-by: CrazyMax * Split cache options doc for each exporter Signed-off-by: CrazyMax * Set default socket permissions to 660 The systemd default is 666, it seems. Signed-off-by: Anders F Björklund * fix SecurityMode being dropped on gateway container Start Signed-off-by: Cory Bennett * bump containerd from 1.5.4 to 1.5.5 Signed-off-by: CrazyMax * go.mod: golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c In preparation of replacing the deprecated github.com/docker/docker/pkg/signal, which uses this version (updating it separately for easier review). Signed-off-by: Sebastiaan van Stijn * replace use of deprecated github.com/docker/docker/pkg/signal This package was moved to a separate module in github.com/moby/sys/signal Signed-off-by: Sebastiaan van Stijn * Additional tests and cleanup for cache/contenthash This adds a little extra testing around ** patterns, and adds a (currently skipped) test for copying directories under symlinks (#2300). It removes an extra call to `filepath.FromSlash` in `shouldIncludePath` and an unused argument to that function. Signed-off-by: Aaron Lehmann * all: remove duplicate imports Signed-off-by: Koichi Shiraishi * all: unify the specs-go package import alias to ocispecs ocispecs means "O"pen "C"ontainer "I"nitiative image-spec/"specs"-go/v1 opencontainers /image-spec/specs-go/v1 Signed-off-by: Koichi Shiraishi * hack/dockerfiles: upgrade golangci-lint version to v1.41.1 Signed-off-by: Koichi Shiraishi * golangci-lint: enable importas and add settings for specs-go package Signed-off-by: Koichi Shiraishi * all: unify the go-digest package import alias to digest Signed-off-by: Koichi Shiraishi * golangci-lint: add go-digest importas setting Signed-off-by: Koichi Shiraishi * Fix IncludePattern/ExcludePattern matching The transformation to rootedPatterns seems very wrong and inconsistent with what the copy logic did. Change it to match the copy logic, and add more testing. Signed-off-by: Aaron Lehmann * dockerfile: fix parsing required key without value Signed-off-by: Tonis Tiigi * generated files: use "go install" to install binaries Now that this repository moved to go1.16, we can use 'go install' to install these binaries. Signed-off-by: Sebastiaan van Stijn * util/stack: update protoc options to work with newer versions Generating the util/stack protos failed when updating protoc-gen-go to v1.5.2; it looks like this is the only proto that's not generated using protoc-gen-gogo): util/stack/generate.go protoc-gen-go: unable to determine Go import path for "stack.proto" Please specify either: • a "go_package" option in the .proto source file, or • a "M" argument on the command line. See https://developers.google.com/protocol-buffers/docs/reference/go-generated#package for more information. --go_out: protoc-gen-go: Plugin failed with status code 1. util/stack/generate.go:3: running "protoc": exit status 1 Newer protobuf versions expect a go package to be set. Other .proto files in this repository use the bare package name, but with protoc-gen-go v1.5.2, this produces an error (package names must at least have a "/"). In addition to including the option to the .proto file also changes the generated result (`options go_package ""`). Using the `-go_opt=M` option on the other hand, didn't change the result (while still on protoc-gen-go v1.3.5), so I used that option instead. protoc-gen-go v1.5.2 also changed the behavior where the generated file is stored, seemingly relative to the `../../vendor` path specified. This coud be fixed either by setting `--go_out=../../`, which was a bit counter-intuitive, or setting the `--go_opt=paths=source_relative` option. The latter also prevented v1.5.2 from storing the file in `utils/stack/github.com/moby/buildkit/utils/stack/` (sigh). Signed-off-by: Sebastiaan van Stijn * add missing ExtraHosts to gateway exec Also adding tests for ExtraHosts and NetMode via gateway exec Signed-off-by: Cory Bennett * add gateway.exec.extrahosts capability Signed-off-by: Cory Bennett * cache: Fix flightcontrol use in computeBlobChain. Previously, the flightcontrol group was being given a key just set to the ref's ID, which meant that concurrent calls using different values of compressionType, createIfNeeded and forceCompression would incorrectly be de-duplicated. The change here splits up the flightcontrol group into a few separate calls and ensures that all the correct input variables are put into the flightcontrol keys. Signed-off-by: Erik Sipsma * solver: include cachemap index in flightcontrol. Signed-off-by: Erik Sipsma * pull: use resolvemode in flightcontrol key. Signed-off-by: Erik Sipsma * util: remove outdated flightcontrol test assertion. The test was making an assertion that is no longer expected to always be true after #2195, which purposely made flightcontrol less deterministic. This lead to occasional failures. Signed-off-by: Erik Sipsma * update go to 1.17 Signed-off-by: Tonis Tiigi * gomod: update to go1.17 Signed-off-by: Tonis Tiigi * Follow links in includedPaths to resolve incorrect caching when source path is behind symlink As discussed in #2300, includedPaths does not resolve symlinks when looking up the source path in the prefix tree. If the user requests a path that involves symlinks (for example, /a/foo when a symlink /a -> /b exists), includedPaths will not find it, and will expect nothing to be copied. This does not match the actual copy behavior implemented in fsutil, which will follow symlinks in prefix components of a given path, so it can end up caching an empty result even though the copy will produce a non-empty result, which is quite bad. To fix this, use getFollowLinks to resolve the path before walking it. In the wildcard case, this is done to the non-wildcard prefix of the path (if any), which matches the behavior in fsutil. Fixes the repro case here: https://gist.github.com/aaronlehmann/64054c9a2cff0d27e200cc107bba3d69 Fixes #2300 Signed-off-by: Aaron Lehmann * cmd/buildkitd: replace BurntSushi/toml with pelletier/go-toml The BurntSushi/toml project has been deprecated, and the ecosystem is converging on using pelletier/go-toml as the "canonical" replacement. Signed-off-by: Sebastiaan van Stijn * control: fix 64bit alignment for buildcount Signed-off-by: Tonis Tiigi * Use fixed fileutils matching functions This is important for two reasons: 1) Keeps caching logic consistent with recent fsutil changes to use these functions (also vendored here). 2) Allows us to move forward with removal of the original buggy Matches implementation in moby/moby. Signed-off-by: Aaron Lehmann * Add `estargz` compression type Signed-off-by: Kohei Tokunaga * Refactor cache metadata interface. There are a few goals with this refactor: 1. Remove external access to fields that no longer make sense and/or won't make sense soon due to other potential changes. For example, there can now be multiple blobs associated with a ref (for different compression types), so the fact that you could access the "Blob" field from the Info method on Ref incorrectly implied there was just a single blob for the ref. This is on top of the fact that there is no need for external access to blob digests. 2. Centralize use of cache metadata inside the cache package. Previously, many parts of the code outside the cache package could obtain the bolt storage item for any ref and read/write it directly. This made it hard to understand what fields are used and when. Now, the Metadata method has been removed from the Ref interface and replaced with getters+setters for metadata fields we want to expose outside the package, which makes it much easier to track and understand. Similar changes have been made to the metadata search interface. 3. Use a consistent getter+setter interface for metadata, replacing the mix of interfaces like Metadata(), Size(), Info() and other inconsistencies. Signed-off-by: Erik Sipsma * Use containerd/pkg/seccomp.IsEnabled() This replaces the local SeccompSupported() utility for the implementation in containerd, which performs the same check. Signed-off-by: Sebastiaan van Stijn * Compute diff from the upper dir of overlayfs-based snapshotter Signed-off-by: Kohei Tokunaga * go.mod: github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 full diff: https://github.com/moby/term/compare/bea5bbe245bf...3f7ff695adc6a35abc925370dd0a4dafb48ec64d updates Azure/go-ansiterm to fix integer overflow on arm Signed-off-by: Sebastiaan van Stijn * go.mod: split the indirect packages After go1.17, all indirect packages are listed in the go.mod file. In addition, has been introduced the ability to list indirect packages separately. Split the indirect packages to make the dependency packages clearer. Signed-off-by: Koichi Shiraishi * exporter: support creating blobs with zstd compression Signed-off-by: Tonis Tiigi * update getremote test for zstd Estargz support has been removed from this test as implementation does not guarantee digest stability and only reason it passed were the exceptions in the test via variant map that ignored cases where timing resulted the digest to go wrong. This needs to be addressed in the follow up if we want to keep estargz support. Signed-off-by: Tonis Tiigi * Add test case for symlink which is not final path component before wildcard Signed-off-by: Aaron Lehmann * hack: allow mounting in workdir in shell Signed-off-by: Tonis Tiigi * Handle the case of multiple path component symlinks (including last component) in wildcard prefix Signed-off-by: Aaron Lehmann * Use getFollowLinksWalked Signed-off-by: Aaron Lehmann * bklog: only log tracing ids when span exporter not nil Signed-off-by: Morlay * Refactor url redacting util Signed-off-by: CrazyMax * Clean up old TODOs Signed-off-by: Tonis Tiigi * Move config parsing to a dedicated pkg Signed-off-by: CrazyMax * Generate and embed build sources Signed-off-by: CrazyMax * resolver: use different mutext for handlers and hosts hosts mutex is called on initialization, meaning `GetResolver` might block if it is in the middle of auth exchange. This is currently bad in the case where Job initialization needs to register a name before timeout is reached. Signed-off-by: Tonis Tiigi * resolver: make sure authorizer is not overwritten on other resolvers Authorizer stores the current session.Group so if it is overwritten for another resolver it means that session might have been dropped and authentication will fail. Signed-off-by: Tonis Tiigi * solver: increase timeout for job registration Signed-off-by: Tonis Tiigi * go.mod: sort and move self-managed indirect dependencies to first block Signed-off-by: Koichi Shiraishi * Fix issues #1980 and #2198 Signed-off-by: Jonathan Giannuzzi * Add BUILDKIT_SANDBOX_HOSTNAME build-arg Signed-off-by: CrazyMax * Fix estargz compression loses the original tar metadata Currently, eStargz compression doesn't preserve the original tar metadata (header bytes and their order). This causes failure of `TestGetRemote` because an uncompressed blob converted from a gzip blob provides different digset against the one converted from eStargz blob even if their original tar (computed by differ) are the same. This commit solves this issue by fixing eStargz to preserve original tar's metadata that is modified by eStargz. Signed-off-by: Kohei Tokunaga * Enhance ANSI color for progress ui Signed-off-by: CrazyMax * Move resolver config to a dedicated package Signed-off-by: CrazyMax * Standard user umask for git process Signed-off-by: CrazyMax * make sure ci runs on version branches Signed-off-by: Tonis Tiigi * return an error instead of panicking when failing to get edge Signed-off-by: Maxime Lagresle * Add support for shm size Signed-off-by: CrazyMax * don't cast Value when pipe is errored Signed-off-by: Maxime Lagresle * Apply Earthly changes to newer buildkit version This commit squashes previous work done in the earthly-main branch 199ad6a into a single commit which is rebased against moby/master branch d429b0b Co-authored-by: Tõnis Tiigi Co-authored-by: Akihiro Suda Co-authored-by: Akihiro Suda Co-authored-by: Aaron Lehmann Co-authored-by: Cory Bennett Co-authored-by: Justin Chadwell Co-authored-by: Erik Sipsma Co-authored-by: CrazyMax Co-authored-by: Levi Harrison Co-authored-by: ktock Co-authored-by: masibw Co-authored-by: Morlay Co-authored-by: CrazyMax <1951866+crazy-max@users.noreply.github.com> Co-authored-by: Claudiu Belu Co-authored-by: Sebastiaan van Stijn Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastiaan van Stijn Co-authored-by: Anders F Björklund Co-authored-by: CrazyMax Co-authored-by: Koichi Shiraishi Co-authored-by: Jonathan Giannuzzi Co-authored-by: Maxime Lagresle --- .github/workflows/build.yml | 156 +- .github/workflows/ci.yml | 28 - Earthfile | 3 +- README.md | 2 +- cache/blobs.go | 11 +- cache/blobs_linux.go | 9 +- cache/converter.go | 5 +- cache/manager.go | 4 +- cache/manager_test.go | 8 +- cache/refs.go | 5 +- cache/remotecache/gha/gha.go | 43 +- client/build_test.go | 45 +- client/client.go | 22 +- client/client_test.go | 108 +- client/llb/exec.go | 79 +- client/llb/meta.go | 36 +- client/llb/state.go | 4 +- client/solve.go | 6 +- cmd/buildkitd/main.go | 11 +- docs/multi-platform.md | 43 - executor/executor.go | 2 +- executor/oci/mounts.go | 14 - executor/oci/mounts_test.go | 123 +- executor/oci/spec.go | 18 +- executor/oci/spec_unix.go | 25 - executor/oci/spec_windows.go | 7 - frontend/dockerfile/builder/build.go | 32 - .../cmd/dockerfile-frontend/hack/release | 19 +- frontend/dockerfile/dockerfile2llb/convert.go | 17 +- .../dockerfile2llb/convert_runmount.go | 4 +- frontend/dockerfile/dockerfile_mount_test.go | 44 - .../dockerfile/dockerfile_runsecurity_test.go | 3 +- frontend/dockerfile/dockerfile_test.go | 92 -- frontend/dockerfile/docs/syntax.md | 1 - .../instructions/commands_runmount.go | 12 - frontend/gateway/container.go | 4 +- go.mod | 11 +- go.sum | 6 +- hack/build_ci_first_pass | 15 +- hack/cross | 15 +- hack/images | 10 +- hack/release-tar | 9 +- hack/test | 15 +- hack/util | 25 +- session/filesync/diffcopy.go | 2 +- session/filesync/filesync.go | 6 +- solver/exporter.go | 20 +- solver/jobs.go | 2 - solver/llbsolver/mounts/mount.go | 21 +- solver/llbsolver/ops/exec.go | 7 +- solver/pb/caps.go | 43 +- solver/pb/ops.pb.go | 1375 ++++++----------- solver/pb/ops.proto | 25 +- source/git/gitsource.go | 1 + source/git/redact_credentials_go114.go | 30 - source/local/local.go | 13 +- util/contentutil/multiprovider.go | 20 - util/entitlements/security/security_linux.go | 118 +- util/resolver/authorizer.go | 2 +- util/resolver/limited/group.go | 84 +- .../github.com/dimchansky/utfbom/.gitignore | 37 - .../github.com/dimchansky/utfbom/.travis.yml | 29 - vendor/github.com/dimchansky/utfbom/LICENSE | 201 --- vendor/github.com/dimchansky/utfbom/README.md | 66 - vendor/github.com/dimchansky/utfbom/utfbom.go | 192 --- .../tonistiigi/go-actions-cache/LICENSE | 21 - .../tonistiigi/go-actions-cache/cache.go | 100 +- .../tonistiigi/go-actions-cache/retry.go | 108 -- vendor/modules.txt | 5 +- worker/base/worker.go | 19 - 70 files changed, 968 insertions(+), 2730 deletions(-) delete mode 100644 .github/workflows/ci.yml delete mode 100644 docs/multi-platform.md delete mode 100644 source/git/redact_credentials_go114.go delete mode 100644 vendor/github.com/dimchansky/utfbom/.gitignore delete mode 100644 vendor/github.com/dimchansky/utfbom/.travis.yml delete mode 100644 vendor/github.com/dimchansky/utfbom/LICENSE delete mode 100644 vendor/github.com/dimchansky/utfbom/README.md delete mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go delete mode 100644 vendor/github.com/tonistiigi/go-actions-cache/LICENSE delete mode 100644 vendor/github.com/tonistiigi/go-actions-cache/retry.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e027870b4de9..81add875279d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,9 +20,9 @@ env: REPO_SLUG_TARGET: "moby/buildkit" DF_REPO_SLUG_TARGET: "docker/dockerfile-upstream" PLATFORMS: "linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64" - CACHE_GHA_SCOPE_IT: "integration-tests" - CACHE_GHA_SCOPE_BINARIES: "binaries" - CACHE_GHA_SCOPE_CROSS: "cross" + CACHEKEY_INTEGRATION_TESTS: "integration-tests" + CACHEKEY_BINARIES: "binaries" + CACHEKEY_CROSS: "cross" jobs: base: @@ -32,8 +32,21 @@ jobs: name: Checkout uses: actions/checkout@v2 - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + name: Cache ${{ env.CACHEKEY_INTEGRATION_TESTS }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_INTEGRATION_TESTS }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_INTEGRATION_TESTS }}- + - + name: Cache ${{ env.CACHEKEY_BINARIES }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}- - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -42,21 +55,28 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug - - name: Build ${{ env.CACHE_GHA_SCOPE_BINARIES }} + name: Build ${{ env.CACHEKEY_BINARIES }} run: | ./hack/build_ci_first_pass binaries env: - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} - CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + CACHEDIR_TO: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }}-new - - name: Build ${{ env.CACHE_GHA_SCOPE_IT }} + name: Build ${{ env.CACHEKEY_INTEGRATION_TESTS }} run: | ./hack/build_ci_first_pass integration-tests env: - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} - CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} + CACHEDIR_TO: /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }}-new + - + # FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850 + name: Move cache + run: | + rm -rf /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + mv /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }}-new /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + rm -rf /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} + mv /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }}-new /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} test: runs-on: ubuntu-latest @@ -89,8 +109,21 @@ jobs: name: Checkout uses: actions/checkout@v2 - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + name: Cache ${{ env.CACHEKEY_INTEGRATION_TESTS }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_INTEGRATION_TESTS }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_INTEGRATION_TESTS }}- + - + name: Cache ${{ env.CACHEKEY_BINARIES }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}- - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -99,7 +132,6 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug - name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }} run: | @@ -113,7 +145,7 @@ jobs: TEST_COVERAGE: 1 TESTPKGS: ${{ matrix.pkg }} SKIP_INTEGRATION_TESTS: ${{ matrix.skip-integration-tests }} - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_INTEGRATION_TESTS }} /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} - name: Upload coverage file uses: actions/upload-artifact@v2 @@ -204,8 +236,13 @@ jobs: name: Checkout uses: actions/checkout@v2 - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + name: Cache ${{ env.CACHEKEY_CROSS }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}- - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -214,7 +251,6 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug - name: Cross run: | @@ -222,8 +258,14 @@ jobs: env: PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64 RUNC_PLATFORMS: ${{ env.PLATFORMS }} - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} - CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} + CACHEDIR_TO: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }}-new + - + # FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850 + name: Move cache + run: | + rm -rf /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} + mv /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }}-new /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} release-base: runs-on: ubuntu-latest @@ -263,8 +305,21 @@ jobs: name: Checkout uses: actions/checkout@v2 - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + name: Cache image${{ matrix.target-stage }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/image${{ matrix.target-stage }} + key: ${{ runner.os }}-buildkit-image${{ matrix.target-stage }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-image${{ matrix.target-stage }}- + - + name: Cache ${{ env.CACHEKEY_CROSS }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}- - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -273,7 +328,6 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug - name: Login to DockerHub if: needs.release-base.outputs.push == 'push' @@ -287,8 +341,14 @@ jobs: ./hack/images "${{ needs.release-base.outputs.tag }}" "$REPO_SLUG_TARGET" "${{ needs.release-base.outputs.push }}" env: TARGET: ${{ matrix.target-stage }} - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} type=gha,scope=image${{ matrix.target-stage }} - CACHE_TO: type=gha,scope=image${{ matrix.target-stage }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} /tmp/.buildkit-cache/image${{ matrix.target-stage }} + CACHEDIR_TO: /tmp/.buildkit-cache/image${{ matrix.target-stage }}-new + - + # FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850 + name: Move cache + run: | + rm -rf /tmp/.buildkit-cache/image${{ matrix.target-stage }} + mv /tmp/.buildkit-cache/image${{ matrix.target-stage }}-new /tmp/.buildkit-cache/image${{ matrix.target-stage }} binaries: runs-on: ubuntu-latest @@ -298,8 +358,21 @@ jobs: name: Checkout uses: actions/checkout@v2 - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + name: Cache ${{ env.CACHEKEY_BINARIES }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_BINARIES }}- + - + name: Cache ${{ env.CACHEKEY_CROSS }} + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} + key: ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-${{ env.CACHEKEY_CROSS }}- - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -308,14 +381,13 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug - name: Build ${{ needs.release-base.outputs.tag }} run: | ./hack/release-tar "${{ needs.release-base.outputs.tag }}" release-out env: PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64 - CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} + CACHEDIR_FROM: /tmp/.buildkit-cache/${{ env.CACHEKEY_BINARIES }} /tmp/.buildkit-cache/${{ env.CACHEKEY_CROSS }} - name: Move artifacts run: | @@ -372,9 +444,6 @@ jobs: - name: Checkout uses: actions/checkout@v2 - - - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -383,7 +452,14 @@ jobs: uses: docker/setup-buildx-action@v1 with: driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} - buildkitd-flags: --debug + - + name: Cache layers + uses: actions/cache@v2 + with: + path: /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }} + key: ${{ runner.os }}-buildkit-frontend-${{ needs.frontend-base.outputs.typ }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildkit-frontend-${{ needs.frontend-base.outputs.typ }}- - name: Login to DockerHub uses: docker/login-action@v1 @@ -396,12 +472,18 @@ jobs: run: | ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ needs.frontend-base.outputs.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}" env: - CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} - CACHE_TO: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} + CACHEDIR_FROM: /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }} + CACHEDIR_TO: /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }}-new - name: Build ${{ needs.frontend-base.outputs.typ }}/labs if: needs.frontend-base.outputs.typ == 'master' run: | ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" labs "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}" env: - CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} + CACHEDIR_FROM: /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }} + - + # FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850 + name: Move cache + run: | + rm -rf /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }} + mv /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }}-new /tmp/.buildkit-cache/frontend-${{ needs.frontend-base.outputs.typ }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 4803951c2962..000000000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: CI - -on: - push: - branches: [ earthly-main ] - pull_request: - branches: [ earthly-main ] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Put back the git branch into git (Earthly uses it for tagging) - run: | - branch="" - if [ -n "$GITHUB_HEAD_REF" ]; then - branch="$GITHUB_HEAD_REF" - else - branch="${GITHUB_REF##*/}" - fi - git checkout -b "$branch" || true - - name: Download released earth - run: "sudo /bin/sh -c 'wget https://github.com/earthly/earthly/releases/latest/download/earthly-linux-amd64 -O /usr/local/bin/earthly && chmod +x /usr/local/bin/earthly'" - - name: Docker Login - run: docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password "${{ secrets.DOCKERHUB_TOKEN }}" - - name: Build - run: earthly +build diff --git a/Earthfile b/Earthfile index 7e7894661721..fba27875543b 100644 --- a/Earthfile +++ b/Earthfile @@ -3,8 +3,7 @@ FROM alpine:3.13 WORKDIR /buildkit build: - ARG DOCKER_TARGET=buildkit-buildkitd-linux - FROM DOCKERFILE --target $DOCKER_TARGET . + FROM DOCKERFILE --target buildkit-buildkitd-linux . code: COPY . . diff --git a/README.md b/README.md index 4e1bd4faadb9..0f87a1beb7d7 100644 --- a/README.md +++ b/README.md @@ -547,7 +547,7 @@ Please refer to [`docs/rootless.md`](docs/rootless.md). ## Building multi-platform images -Please refer to [`docs/multi-platform.md`](docs/multi-platform.md). +See [`docker buildx` documentation](https://github.com/docker/buildx#building-multi-platform-images) ## Contributing diff --git a/cache/blobs.go b/cache/blobs.go index 7583caaab038..ff70495edc55 100644 --- a/cache/blobs.go +++ b/cache/blobs.go @@ -39,7 +39,7 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo return errors.Errorf("missing lease requirement for computeBlobChain") } - if err := sr.Finalize(ctx); err != nil { + if err := sr.finalizeLocked(ctx); err != nil { return err } @@ -126,13 +126,8 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } else if !isTypeWindows(sr) { enableOverlay, fallback = true, true switch sr.cm.ManagerOpt.Snapshotter.Name() { - case "overlayfs", "stargz": - // overlayfs-based snapshotters should support overlay diff. so print warn log on failure. - logWarnOnErr = true - case "fuse-overlayfs": - // not supported with fuse-overlayfs snapshotter which doesn't provide overlayfs mounts. - // TODO: add support for fuse-overlayfs - enableOverlay = false + case "overlayfs", "fuse-overlayfs", "stargz": + logWarnOnErr = true // snapshotter should support overlay diff. so print warn log on failure } } if enableOverlay { diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go index b614918b4ba9..614895f234d2 100644 --- a/cache/blobs_linux.go +++ b/cache/blobs_linux.go @@ -4,7 +4,6 @@ package cache import ( - "bufio" "bytes" "context" "fmt" @@ -72,11 +71,10 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper } }() - bufW := bufio.NewWriterSize(cw, 128*1024) var labels map[string]string if compressorFunc != nil { dgstr := digest.SHA256.Digester() - compressed, err := compressorFunc(bufW, mediaType) + compressed, err := compressorFunc(cw, mediaType) if err != nil { return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream") } @@ -90,14 +88,11 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper } labels[containerdUncompressed] = dgstr.Digest().String() } else { - if err = writeOverlayUpperdir(ctx, bufW, upperdir, lower); err != nil { + if err = writeOverlayUpperdir(ctx, cw, upperdir, lower); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write diff") } } - if err := bufW.Flush(); err != nil { - return emptyDesc, false, errors.Wrap(err, "failed to flush diff") - } var commitopts []content.Opt if labels != nil { commitopts = append(commitopts, content.WithLabels(labels)) diff --git a/cache/converter.go b/cache/converter.go index 4bc2dc65cc49..d2bfd6ae32f6 100644 --- a/cache/converter.go +++ b/cache/converter.go @@ -191,13 +191,16 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec newDesc.MediaType = c.target.DefaultMediaType() newDesc.Digest = info.Digest newDesc.Size = info.Size - newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()} + newDesc.Annotations = nil if c.finalize != nil { a, err := c.finalize(ctx, cs) if err != nil { return nil, errors.Wrapf(err, "failed finalize compression") } for k, v := range a { + if newDesc.Annotations == nil { + newDesc.Annotations = make(map[string]string) + } newDesc.Annotations[k] = v } } diff --git a/cache/manager.go b/cache/manager.go index 8ddcfe8dfbc0..e63879975fb7 100644 --- a/cache/manager.go +++ b/cache/manager.go @@ -129,7 +129,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, chainID = imagespecidentity.ChainID([]digest.Digest{p.getChainID(), chainID}) blobChainID = imagespecidentity.ChainID([]digest.Digest{p.getBlobChainID(), blobChainID}) - if err := p.Finalize(ctx); err != nil { + if err := p.finalizeLocked(ctx); err != nil { p.Release(context.TODO()) return nil, err } @@ -467,7 +467,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr } parent = p.(*immutableRef) } - if err := parent.Finalize(ctx); err != nil { + if err := parent.finalizeLocked(ctx); err != nil { return nil, err } if err := parent.Extract(ctx, sess); err != nil { diff --git a/cache/manager_test.go b/cache/manager_test.go index 12cfca90d99c..b14ebd8954d2 100644 --- a/cache/manager_test.go +++ b/cache/manager_test.go @@ -226,7 +226,7 @@ func TestManager(t *testing.T) { checkDiskUsage(ctx, t, cm, 1, 0) - err = snap.Finalize(ctx) + err = snap.(*immutableRef).finalizeLocked(ctx) require.NoError(t, err) err = snap.Release(ctx) @@ -948,7 +948,7 @@ func TestLazyCommit(t *testing.T) { require.NoError(t, err) // this time finalize commit - err = snap.Finalize(ctx) + err = snap.(*immutableRef).finalizeLocked(ctx) require.NoError(t, err) err = snap.Release(ctx) @@ -1022,7 +1022,7 @@ func TestLazyCommit(t *testing.T) { snap2, err = cm.Get(ctx, snap.ID()) require.NoError(t, err) - err = snap2.Finalize(ctx) + err = snap2.(*immutableRef).finalizeLocked(ctx) require.NoError(t, err) err = snap2.Release(ctx) @@ -1115,8 +1115,6 @@ func TestConversion(t *testing.T) { require.NoError(t, err, testName) } require.Equal(t, recreatedDesc.Digest, orgDesc.Digest, testName) - require.NotNil(t, recreatedDesc.Annotations) - require.Equal(t, recreatedDesc.Annotations["containerd.io/uncompressed"], orgDesc.Digest.String(), testName) return nil }) } diff --git a/cache/refs.go b/cache/refs.go index e0d83c15159d..801e249507cb 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -40,9 +40,6 @@ type ImmutableRef interface { Ref Parent() ImmutableRef Clone() ImmutableRef - // Finalize commits the snapshot to the driver if it's not already. - // This means the snapshot can no longer be mounted as mutable. - Finalize(context.Context) error Extract(ctx context.Context, s session.Group) error // +progress GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, forceCompression bool, s session.Group) (*solver.Remote, error) @@ -821,7 +818,7 @@ func (sr *immutableRef) release(ctx context.Context) error { return nil } -func (sr *immutableRef) Finalize(ctx context.Context) error { +func (sr *immutableRef) finalizeLocked(ctx context.Context) error { sr.mu.Lock() defer sr.mu.Unlock() return sr.finalize(ctx) diff --git a/cache/remotecache/gha/gha.go b/cache/remotecache/gha/gha.go index a8fded568f49..bde0646995f5 100644 --- a/cache/remotecache/gha/gha.go +++ b/cache/remotecache/gha/gha.go @@ -5,8 +5,6 @@ import ( "context" "encoding/json" "fmt" - "os" - "sync" "time" "github.com/containerd/containerd/content" @@ -143,9 +141,7 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { return nil, layerDone(err) } if err := ce.cache.Save(ctx, key, ra); err != nil { - if !errors.Is(err, os.ErrExist) { - return nil, layerDone(errors.Wrap(err, "error writing layer blob")) - } + return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) } @@ -309,47 +305,18 @@ func (ci *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id strin } type ciProvider struct { - ci *importer - desc ocispecs.Descriptor - mu sync.Mutex - entries map[digest.Digest]*actionscache.Entry -} - -func (p *ciProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error { - if desc.Digest != p.desc.Digest { - return nil - } - - _, err := p.loadEntry(ctx, desc) - return err + desc ocispecs.Descriptor + ci *importer } -func (p *ciProvider) loadEntry(ctx context.Context, desc ocispecs.Descriptor) (*actionscache.Entry, error) { - p.mu.Lock() - defer p.mu.Unlock() - - if ce, ok := p.entries[desc.Digest]; ok { - return ce, nil - } +func (p *ciProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { key := "buildkit-blob-" + version + "-" + desc.Digest.String() ce, err := p.ci.cache.Load(ctx, key) if err != nil { return nil, err } if ce == nil { - return nil, errors.Errorf("blob %s not found", desc.Digest) - } - if p.entries == nil { - p.entries = make(map[digest.Digest]*actionscache.Entry) - } - p.entries[desc.Digest] = ce - return ce, nil -} - -func (p *ciProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { - ce, err := p.loadEntry(ctx, desc) - if err != nil { - return nil, err + return nil, errors.Errorf("blob not found") } rac := ce.Download(context.TODO()) return &readerAt{ReaderAtCloser: rac, desc: desc}, nil diff --git a/client/build_test.go b/client/build_test.go index 72f7ef5de248..d9a0f0b50c99 100644 --- a/client/build_test.go +++ b/client/build_test.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" "testing" "time" @@ -1519,36 +1518,29 @@ func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox product := "buildkit_test" - command := []string{"sh", "-c", `cat /proc/self/status | grep CapEff | cut -f 2`} + var command []string mode := llb.SecurityModeSandbox var allowedEntitlements []entitlements.Entitlement - var assertCaps func(caps uint64) secMode := sb.Value("secmode") if secMode == securitySandbox { - assertCaps = func(caps uint64) { - /* - $ capsh --decode=00000000a80425fb - 0x00000000a80425fb=cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap, - cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap - */ - require.EqualValues(t, 0xa80425fb, caps) - } + /* + $ capsh --decode=00000000a80425fb + 0x00000000a80425fb=cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap, + cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap + */ + command = []string{"sh", "-c", `cat /proc/self/status | grep CapEff | grep "00000000a80425fb"`} allowedEntitlements = []entitlements.Entitlement{} } else { skipDockerd(t, sb) - assertCaps = func(caps uint64) { - /* - $ capsh --decode=0000003fffffffff - 0x0000003fffffffff=cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_kill,cap_setgid, - cap_setuid,cap_setpcap,cap_linux_immutable,cap_net_bind_service,cap_net_broadcast,cap_net_admin,cap_net_raw, - cap_ipc_lock,cap_ipc_owner,cap_sys_module,cap_sys_rawio,cap_sys_chroot,cap_sys_ptrace,cap_sys_pacct,cap_sys_admin, - cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_time,cap_sys_tty_config,cap_mknod,cap_lease,cap_audit_write, - cap_audit_control,cap_setfcap,cap_mac_override,cap_mac_admin,cap_syslog,cap_wake_alarm,cap_block_suspend,cap_audit_read - */ - - // require that _at least_ minimum capabilities are granted - require.EqualValues(t, 0x3fffffffff, caps&0x3fffffffff) - } + /* + $ capsh --decode=0000003fffffffff + 0x0000003fffffffff=cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_kill,cap_setgid, + cap_setuid,cap_setpcap,cap_linux_immutable,cap_net_bind_service,cap_net_broadcast,cap_net_admin,cap_net_raw, + cap_ipc_lock,cap_ipc_owner,cap_sys_module,cap_sys_rawio,cap_sys_chroot,cap_sys_ptrace,cap_sys_pacct,cap_sys_admin, + cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_time,cap_sys_tty_config,cap_mknod,cap_lease,cap_audit_write, + cap_audit_control,cap_setfcap,cap_mac_override,cap_mac_admin,cap_syslog,cap_wake_alarm,cap_block_suspend,cap_audit_read + */ + command = []string{"sh", "-c", `cat /proc/self/status | grep CapEff | grep "0000003fffffffff"`} mode = llb.SecurityModeInsecure allowedEntitlements = []entitlements.Entitlement{entitlements.EntitlementSecurityInsecure} } @@ -1602,11 +1594,6 @@ func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox require.NoError(t, err) - capsValue, err := strconv.ParseUint(strings.TrimSpace(stdout.String()), 16, 64) - require.NoError(t, err) - - assertCaps(capsValue) - return &client.Result{}, nil } diff --git a/client/client.go b/client/client.go index b42a2c65ccbd..45562ddc30e3 100644 --- a/client/client.go +++ b/client/client.go @@ -29,8 +29,7 @@ import ( ) type Client struct { - conn *grpc.ClientConn - sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + conn *grpc.ClientConn } type ClientOpt interface{} @@ -38,8 +37,8 @@ type ClientOpt interface{} // New returns a new buildkit client. Address can be empty for the system-default address. func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { gopts := []grpc.DialOption{ - grpc.WithInitialWindowSize(65535 * 32), - grpc.WithInitialConnWindowSize(65535 * 16), + grpc.WithInitialWindowSize(65535 * 32), //earthly + grpc.WithInitialConnWindowSize(65535 * 16), //earthly grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), } @@ -52,7 +51,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error var customTracer bool // allows manually setting disabling tracing even if tracer in context var tracerProvider trace.TracerProvider var tracerDelegate TracerDelegate - var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error) for _, o := range opts { if _, ok := o.(*withFailFast); ok { @@ -77,9 +75,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error if wt, ok := o.(*withTracerDelegate); ok { tracerDelegate = wt } - if sd, ok := o.(*withSessionDialer); ok { - sessionDialer = sd.dialer - } } if !customTracer { @@ -138,8 +133,7 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error } c := &Client{ - conn: conn, - sessionDialer: sessionDialer, + conn: conn, } if tracerDelegate != nil { @@ -252,14 +246,6 @@ type withTracerDelegate struct { TracerDelegate } -func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt { - return &withSessionDialer{dialer} -} - -type withSessionDialer struct { - dialer func(context.Context, string, map[string][]string) (net.Conn, error) -} - func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { ch, err := connhelper.GetConnectionHelper(address) if err != nil { diff --git a/client/client_test.go b/client/client_test.go index 9a1c6c261d71..02c83d8138c4 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -17,7 +17,6 @@ import ( "os" "path/filepath" "runtime" - "strconv" "strings" "syscall" "testing" @@ -109,7 +108,6 @@ func TestIntegration(t *testing.T) { testSecretMounts, testExtraHosts, testShmSize, - testUlimit, testNetworkMode, testFrontendMetadataReturn, testFrontendUseSolveResults, @@ -536,10 +534,8 @@ func testShmSize(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - st := llb.Image("busybox:latest").Run( - llb.AddMount("/dev/shm", llb.Scratch(), llb.Tmpfs(llb.TmpfsSize(128*1024*1024))), - llb.Shlex(`sh -c 'mount | grep /dev/shm > /out/out'`), - ) + st := llb.Image("busybox:latest"). + Run(llb.Shlex(`sh -c 'mount | grep /dev/shm > /out/out'`), llb.WithShmSize(128*1024)) out := st.AddMount("/out", llb.Scratch()) def, err := out.Marshal(sb.Context()) @@ -564,47 +560,6 @@ func testShmSize(t *testing.T, sb integration.Sandbox) { require.Contains(t, string(dt), `size=131072k`) } -func testUlimit(t *testing.T, sb integration.Sandbox) { - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string, ro ...llb.RunOption) { - st = busybox.Run(append(ro, llb.Shlex(cmd), llb.Dir("/wd"))...).AddMount("/wd", st) - } - - run(`sh -c "ulimit -n > first"`, llb.AddUlimit(llb.UlimitNofile, 1062, 1062)) - run(`sh -c "ulimit -n > second"`) - - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "first")) - require.NoError(t, err) - require.Equal(t, `1062`, strings.TrimSpace(string(dt))) - - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "second")) - require.NoError(t, err) - require.NotEqual(t, `1062`, strings.TrimSpace(string(dt2))) -} - func testNetworkMode(t *testing.T, sb integration.Sandbox) { c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -679,36 +634,29 @@ func testPushByDigest(t *testing.T, sb integration.Sandbox) { } func testSecurityMode(t *testing.T, sb integration.Sandbox) { - command := `sh -c 'cat /proc/self/status | grep CapEff | cut -f 2 > /out'` + var command string mode := llb.SecurityModeSandbox var allowedEntitlements []entitlements.Entitlement - var assertCaps func(caps uint64) secMode := sb.Value("secmode") if secMode == securitySandbox { - assertCaps = func(caps uint64) { - /* - $ capsh --decode=00000000a80425fb - 0x00000000a80425fb=cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap, - cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap - */ - require.EqualValues(t, 0xa80425fb, caps) - } + /* + $ capsh --decode=00000000a80425fb + 0x00000000a80425fb=cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap, + cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap + */ + command = `sh -c 'cat /proc/self/status | grep CapEff | grep "00000000a80425fb"'` allowedEntitlements = []entitlements.Entitlement{} } else { skipDockerd(t, sb) - assertCaps = func(caps uint64) { - /* - $ capsh --decode=0000003fffffffff - 0x0000003fffffffff=cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_kill,cap_setgid, - cap_setuid,cap_setpcap,cap_linux_immutable,cap_net_bind_service,cap_net_broadcast,cap_net_admin,cap_net_raw, - cap_ipc_lock,cap_ipc_owner,cap_sys_module,cap_sys_rawio,cap_sys_chroot,cap_sys_ptrace,cap_sys_pacct,cap_sys_admin, - cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_time,cap_sys_tty_config,cap_mknod,cap_lease,cap_audit_write, - cap_audit_control,cap_setfcap,cap_mac_override,cap_mac_admin,cap_syslog,cap_wake_alarm,cap_block_suspend,cap_audit_read - */ - - // require that _at least_ minimum capabilities are granted - require.EqualValues(t, 0x3fffffffff, caps&0x3fffffffff) - } + /* + $ capsh --decode=0000003fffffffff + 0x0000003fffffffff=cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_kill,cap_setgid, + cap_setuid,cap_setpcap,cap_linux_immutable,cap_net_bind_service,cap_net_broadcast,cap_net_admin,cap_net_raw, + cap_ipc_lock,cap_ipc_owner,cap_sys_module,cap_sys_rawio,cap_sys_chroot,cap_sys_ptrace,cap_sys_pacct,cap_sys_admin, + cap_sys_boot,cap_sys_nice,cap_sys_resource,cap_sys_time,cap_sys_tty_config,cap_mknod,cap_lease,cap_audit_write, + cap_audit_control,cap_setfcap,cap_mac_override,cap_mac_admin,cap_syslog,cap_wake_alarm,cap_block_suspend,cap_audit_read + */ + command = `sh -c 'cat /proc/self/status | grep CapEff | grep "0000003fffffffff"'` mode = llb.SecurityModeInsecure allowedEntitlements = []entitlements.Entitlement{entitlements.EntitlementSecurityInsecure} } @@ -724,31 +672,11 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, AllowedEntitlements: allowedEntitlements, }, nil) require.NoError(t, err) - - contents, err := ioutil.ReadFile(filepath.Join(destDir, "out")) - require.NoError(t, err) - - caps, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 16, 64) - require.NoError(t, err) - - t.Logf("Caps: %x", caps) - - assertCaps(caps) } func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) { diff --git a/client/llb/exec.go b/client/llb/exec.go index f1823bbb700c..363d2f48e0c3 100644 --- a/client/llb/exec.go +++ b/client/llb/exec.go @@ -43,8 +43,7 @@ type mount struct { selector string cacheID string tmpfs bool - tmpfsOpt TmpfsInfo - hostBind bool + hostBind bool //earthly cacheSharing CacheMountSharingMode noOutput bool } @@ -207,21 +206,12 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] meta.ExtraHosts = hosts } - ulimits, err := getUlimit(e.base)(ctx, c) + shmSize, err := getShmSize(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } - if len(ulimits) > 0 { - addCap(&e.constraints, pb.CapExecMetaUlimit) - ul := make([]*pb.Ulimit, len(ulimits)) - for i, u := range ulimits { - ul[i] = &pb.Ulimit{ - Name: u.Name, - Soft: u.Soft, - Hard: u.Hard, - } - } - meta.Ulimit = ul + if shmSize != nil { + meta.ShmSize = *shmSize } network, err := getNetwork(e.base)(ctx, c) @@ -269,9 +259,6 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] addCap(&e.constraints, pb.CapExecMountCacheSharing) } else if m.tmpfs { addCap(&e.constraints, pb.CapExecMountTmpfs) - if m.tmpfsOpt.Size > 0 { - addCap(&e.constraints, pb.CapExecMountTmpfsSize) - } } else if m.source != nil { addCap(&e.constraints, pb.CapExecMountBind) } @@ -356,11 +343,8 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } if m.tmpfs { pm.MountType = pb.MountType_TMPFS - pm.TmpfsOpt = &pb.TmpfsOpt{ - Size_: m.tmpfsOpt.Size, - } } - if m.hostBind { + if m.hostBind { //earthly pm.MountType = pb.MountType_HOST_BIND } peo.Mounts = append(peo.Mounts, pm) @@ -482,38 +466,13 @@ func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption } } -func Tmpfs(opts ...TmpfsOption) MountOption { +func Tmpfs() MountOption { return func(m *mount) { - t := &TmpfsInfo{} - for _, opt := range opts { - opt.SetTmpfsOption(t) - } m.tmpfs = true - m.tmpfsOpt = *t } } -type TmpfsOption interface { - SetTmpfsOption(*TmpfsInfo) -} - -type tmpfsOptionFunc func(*TmpfsInfo) - -func (fn tmpfsOptionFunc) SetTmpfsOption(ti *TmpfsInfo) { - fn(ti) -} - -func TmpfsSize(b int64) TmpfsOption { - return tmpfsOptionFunc(func(ti *TmpfsInfo) { - ti.Size = b - }) -} - -type TmpfsInfo struct { - Size int64 -} - -func HostBind() MountOption { +func HostBind() MountOption { //earthly return func(m *mount) { m.hostBind = true } @@ -558,9 +517,9 @@ func AddExtraHost(host string, ip net.IP) RunOption { }) } -func AddUlimit(name UlimitName, soft int64, hard int64) RunOption { +func WithShmSize(kb int64) RunOption { return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.AddUlimit(name, soft, hard) + ei.State = ei.State.WithShmSize(kb) }) } @@ -733,23 +692,3 @@ const ( SecurityModeInsecure = pb.SecurityMode_INSECURE SecurityModeSandbox = pb.SecurityMode_SANDBOX ) - -type UlimitName string - -const ( - UlimitCore UlimitName = "core" - UlimitCPU UlimitName = "cpu" - UlimitData UlimitName = "data" - UlimitFsize UlimitName = "fsize" - UlimitLocks UlimitName = "locks" - UlimitMemlock UlimitName = "memlock" - UlimitMsgqueue UlimitName = "msgqueue" - UlimitNice UlimitName = "nice" - UlimitNofile UlimitName = "nofile" - UlimitNproc UlimitName = "nproc" - UlimitRss UlimitName = "rss" - UlimitRtprio UlimitName = "rtprio" - UlimitRttime UlimitName = "rttime" - UlimitSigpending UlimitName = "sigpending" - UlimitStack UlimitName = "stack" -) diff --git a/client/llb/meta.go b/client/llb/meta.go index 99c37b814371..4705bf1d24e6 100644 --- a/client/llb/meta.go +++ b/client/llb/meta.go @@ -18,14 +18,13 @@ var ( keyArgs = contextKeyT("llb.exec.args") keyDir = contextKeyT("llb.exec.dir") keyEnv = contextKeyT("llb.exec.env") - keyExtraHost = contextKeyT("llb.exec.extrahost") - keyHostname = contextKeyT("llb.exec.hostname") - keyUlimit = contextKeyT("llb.exec.ulimit") keyUser = contextKeyT("llb.exec.user") - - keyPlatform = contextKeyT("llb.platform") - keyNetwork = contextKeyT("llb.network") - keySecurity = contextKeyT("llb.security") + keyHostname = contextKeyT("llb.exec.hostname") + keyExtraHost = contextKeyT("llb.exec.extrahost") + keyShmSize = contextKeyT("llb.exec.shmsize") + keyPlatform = contextKeyT("llb.platform") + keyNetwork = contextKeyT("llb.network") + keySecurity = contextKeyT("llb.security") ) func AddEnvf(key, value string, v ...interface{}) StateOption { @@ -234,30 +233,21 @@ type HostIP struct { IP net.IP } -func ulimit(name UlimitName, soft int64, hard int64) StateOption { +func shmSize(kb int64) StateOption { return func(s State) State { - return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (interface{}, error) { - v, err := getUlimit(s)(ctx, c) - if err != nil { - return nil, err - } - return append(v, pb.Ulimit{ - Name: string(name), - Soft: soft, - Hard: hard, - }), nil - }) + return s.WithValue(keyShmSize, kb) } } -func getUlimit(s State) func(context.Context, *Constraints) ([]pb.Ulimit, error) { - return func(ctx context.Context, c *Constraints) ([]pb.Ulimit, error) { - v, err := s.getValue(keyUlimit)(ctx, c) +func getShmSize(s State) func(context.Context, *Constraints) (*int64, error) { + return func(ctx context.Context, c *Constraints) (*int64, error) { + v, err := s.getValue(keyShmSize)(ctx, c) if err != nil { return nil, err } if v != nil { - return v.([]pb.Ulimit), nil + kb := v.(int64) + return &kb, nil } return nil, nil } diff --git a/client/llb/state.go b/client/llb/state.go index 408e7de310fd..a39265071946 100644 --- a/client/llb/state.go +++ b/client/llb/state.go @@ -397,8 +397,8 @@ func (s State) AddExtraHost(host string, ip net.IP) State { return extraHost(host, ip)(s) } -func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State { - return ulimit(name, soft, hard)(s) +func (s State) WithShmSize(kb int64) State { + return shmSize(kb)(s) } func (s State) isFileOpCopyInput() {} diff --git a/client/solve.go b/client/solve.go index 6646520b30dd..f7e1891ac2c1 100644 --- a/client/solve.go +++ b/client/solve.go @@ -176,11 +176,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } eg.Go(func() error { - sd := c.sessionDialer - if sd == nil { - sd = grpchijack.Dialer(c.controlClient()) - } - return s.Run(statusContext, sd) + return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) }) } diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go index d987e7099790..60925925eaf8 100644 --- a/cmd/buildkitd/main.go +++ b/cmd/buildkitd/main.go @@ -23,6 +23,7 @@ import ( "github.com/containerd/containerd/sys" sddaemon "github.com/coreos/go-systemd/v22/daemon" "github.com/docker/docker/pkg/reexec" + "github.com/docker/go-connections/sockets" "github.com/gofrs/flock" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "github.com/moby/buildkit/cache/remotecache" @@ -278,7 +279,7 @@ func main() { controller.Register(server) - // start earthly-specific registry server + // earthly specific lrPort, ok := os.LookupEnv("BUILDKIT_LOCAL_REGISTRY_LISTEN_PORT") if ok { logrus.Infof("Starting local registry for outputs on port %s", lrPort) @@ -561,16 +562,10 @@ func getListener(addr string, uid, gid int, tlsConfig *tls.Config) (net.Listener case "fd": return listenFD(listenAddr, tlsConfig) case "tcp": - l, err := net.Listen("tcp", listenAddr) - if err != nil { - return nil, err - } - if tlsConfig == nil { logrus.Warnf("TLS is not enabled for %s. enabling mutual TLS authentication is highly recommended", addr) - return l, nil } - return tls.NewListener(l, tlsConfig), nil + return sockets.NewTCPSocket(listenAddr, tlsConfig) default: return nil, errors.Errorf("addr %s not supported", addr) } diff --git a/docs/multi-platform.md b/docs/multi-platform.md deleted file mode 100644 index 8506e885b3ac..000000000000 --- a/docs/multi-platform.md +++ /dev/null @@ -1,43 +0,0 @@ -# Building multi-platform images - -| :zap: For building multi-platform images with `docker buildx`, see [the `docker buildx` documentation](https://github.com/docker/buildx#building-multi-platform-images). | -|--------------------------------------------------------------------------| - - - -BuildKit provides built-in support for building multi-platform by setting a comma-separated list of -[platform specifiers](https://github.com/containerd/containerd/blob/v1.5.7/platforms/platforms.go#L63) as `platform` option. - -```bash -buildctl build \ - --frontend dockerfile.v0 \ - --opt platform=linux/amd64,linux/arm64 \ - --output type=image,name=docker.io/username/image,push=true \ - ... -``` - -When your build needs to run a binary for architecture that is not supported natively by your host, it gets executed using a QEMU user-mode emulator. -You do not need to set up QEMU manually in most cases. - -## Troubleshooting - -### Error `exec user process caused: exec format error` - -You may face an error like `exec user process caused: exec format error`, mostly when you are using a third-party package of BuildKit that lacks -`buildkit-qemu-*` binaries. - -In such a case, you have to download the official binary release of BuildKit from https://github.com/moby/buildkit/releases , and install -the `buildkit-qemu-*` binaries in the release archive into the `$PATH` of the host. - -You may also face `exec format error` when the container contains mix of binaries for multiple architectures. - -In such a case, you have to register QEMU into `/proc/sys/fs/binfmt_misc` so that the kernel can execute foreign binaries using QEMU. - -QEMU is registered into `/proc/sys/fs/binfmt_misc` by default on Docker Desktop. -On other environments, the common way to register QEMU is to use `tonistiigi/binfmt` Docker image. - -```bash -docker run --privileged --rm tonistiigi/binfmt --install all -``` - -See also [`tonistiigi/binfmt` documentation](https://github.com/tonistiigi/binfmt/). diff --git a/executor/executor.go b/executor/executor.go index fd6f6cdafb49..5bca23f1146b 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -18,7 +18,7 @@ type Meta struct { Tty bool ReadonlyRootFS bool ExtraHosts []HostIP - Ulimit []*pb.Ulimit + ShmSize int64 NetMode pb.NetMode SecurityMode pb.SecurityMode } diff --git a/executor/oci/mounts.go b/executor/oci/mounts.go index 6f4d1367066c..62360f466334 100644 --- a/executor/oci/mounts.go +++ b/executor/oci/mounts.go @@ -100,17 +100,3 @@ func withBoundProc() oci.SpecOpts { return nil } } - -func dedupMounts(mnts []specs.Mount) []specs.Mount { - ret := make([]specs.Mount, 0, len(mnts)) - visited := make(map[string]int) - for i, mnt := range mnts { - if j, ok := visited[mnt.Destination]; ok { - ret[j] = mnt - } else { - visited[mnt.Destination] = i - ret = append(ret, mnt) - } - } - return ret -} diff --git a/executor/oci/mounts_test.go b/executor/oci/mounts_test.go index 5b3c473220d7..0e6900a83682 100644 --- a/executor/oci/mounts_test.go +++ b/executor/oci/mounts_test.go @@ -8,56 +8,8 @@ import ( "github.com/moby/buildkit/util/appcontext" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -// The default mount-list from containerd -// https://github.com/containerd/containerd/blob/main/oci/mounts.go -var containerdDefMounts = []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/run", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, -} - func TestHasPrefix(t *testing.T) { type testCase struct { path string @@ -147,53 +99,56 @@ func TestHasPrefix(t *testing.T) { } func TestWithRemovedMounts(t *testing.T) { + // The default mount-list from containerd s := oci.Spec{ - Mounts: containerdDefMounts, - } - - oldLen := len(s.Mounts) - err := withRemovedMount("/run")(appcontext.Context(), nil, nil, &s) - assert.NoError(t, err) - assert.Equal(t, oldLen-1, len(s.Mounts)) -} - -func TestDedupMounts(t *testing.T) { - s := oci.Spec{ - Mounts: append(containerdDefMounts, []specs.Mount{ + Mounts: []specs.Mount{ { - Destination: "/dev/shm", + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "size=131072k"}, + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, }, { - Destination: "/foo", - Type: "bind", - Source: "/bar", - Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"}, + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, }, { Destination: "/dev/mqueue", Type: "mqueue", Source: "mqueue", - Options: []string{"nosuid"}, + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, }, - }...), + { + Destination: "/run", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + }, } - mntsLen := len(s.Mounts) - s.Mounts = dedupMounts(s.Mounts) - require.Equal(t, mntsLen-2, len(s.Mounts)) - assert.Equal(t, specs.Mount{ - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "size=131072k"}, - }, s.Mounts[3]) - assert.Equal(t, specs.Mount{ - Destination: "/foo", - Type: "bind", - Source: "/bar", - Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"}, - }, s.Mounts[len(s.Mounts)-1]) + oldLen := len(s.Mounts) + err := withRemovedMount("/run")(appcontext.Context(), nil, nil, &s) + assert.NoError(t, err) + assert.Equal(t, oldLen-1, len(s.Mounts)) } diff --git a/executor/oci/spec.go b/executor/oci/spec.go index 3ed128ad4377..35615d8dbece 100644 --- a/executor/oci/spec.go +++ b/executor/oci/spec.go @@ -72,12 +72,6 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } - if rlimitsOpts, err := generateRlimitOpts(meta.Ulimit); err == nil { - opts = append(opts, rlimitsOpts...) - } else { - return nil, nil, err - } - hostname := defaultHostname if meta.Hostname != "" { hostname = meta.Hostname @@ -97,21 +91,22 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou oci.WithHostname(hostname), ) + if meta.ShmSize > 0 { + opts = append(opts, oci.WithDevShmSize(meta.ShmSize)) + } + s, err := oci.GenerateSpec(ctx, nil, c, opts...) if err != nil { return nil, nil, err } - if len(meta.Ulimit) == 0 { - // reset open files limit - s.Process.Rlimits = nil - } - // set the networking information on the spec if err := namespace.Set(s); err != nil { return nil, nil, err } + s.Process.Rlimits = nil // reset open files limit + sm := &submounts{} var releasers []func() error @@ -164,7 +159,6 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou }) } - s.Mounts = dedupMounts(s.Mounts) return s, releaseAll, nil } diff --git a/executor/oci/spec_unix.go b/executor/oci/spec_unix.go index 967a04c8147c..40ef8ed83771 100644 --- a/executor/oci/spec_unix.go +++ b/executor/oci/spec_unix.go @@ -4,8 +4,6 @@ package oci import ( "context" - "fmt" - "strings" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" @@ -80,29 +78,6 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { }, nil } -func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { - if len(ulimits) == 0 { - return nil, nil - } - var rlimits []specs.POSIXRlimit - for _, u := range ulimits { - if u == nil { - continue - } - rlimits = append(rlimits, specs.POSIXRlimit{ - Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)), - Hard: uint64(u.Hard), - Soft: uint64(u.Soft), - }) - } - return []oci.SpecOpts{ - func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { - s.Process.Rlimits = rlimits - return nil - }, - }, nil -} - // withDefaultProfile sets the default seccomp profile to the spec. // Note: must follow the setting of process capabilities func withDefaultProfile() oci.SpecOpts { diff --git a/executor/oci/spec_windows.go b/executor/oci/spec_windows.go index dc8193db46f2..ea3afe86a4fb 100644 --- a/executor/oci/spec_windows.go +++ b/executor/oci/spec_windows.go @@ -35,10 +35,3 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { } return nil, errors.New("no support for IdentityMapping on Windows") } - -func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { - if len(ulimits) == 0 { - return nil, nil - } - return nil, errors.New("no support for POSIXRlimit on Windows") -} diff --git a/frontend/dockerfile/builder/build.go b/frontend/dockerfile/builder/build.go index a46f59e4ecaf..7cc3802dc2c2 100644 --- a/frontend/dockerfile/builder/build.go +++ b/frontend/dockerfile/builder/build.go @@ -14,7 +14,6 @@ import ( "strings" "github.com/containerd/containerd/platforms" - "github.com/docker/go-units" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" @@ -56,7 +55,6 @@ const ( keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented keyShmSize = "shm-size" keyTargetPlatform = "platform" - keyUlimit = "ulimit" // Don't forget to update frontend documentation if you add // a new build-arg: frontend/dockerfile/docs/syntax.md @@ -124,11 +122,6 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrap(err, "failed to parse shm size") } - ulimit, err := parseUlimits(opts[keyUlimit]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse ulimit") - } - defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) if err != nil { return nil, err @@ -441,7 +434,6 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { PrefixPlatform: exportMap, ExtraHosts: extraHosts, ShmSize: shmSize, - Ulimit: ulimit, ForceNetMode: defaultNetMode, OverrideCopyImage: opts[keyOverrideCopyImage], LLBCaps: &caps, @@ -701,30 +693,6 @@ func parseShmSize(v string) (int64, error) { return kb, nil } -func parseUlimits(v string) ([]pb.Ulimit, error) { - if v == "" { - return nil, nil - } - out := make([]pb.Ulimit, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - ulimit, err := units.ParseUlimit(field) - if err != nil { - return nil, err - } - out = append(out, pb.Ulimit{ - Name: ulimit.Name, - Soft: ulimit.Soft, - Hard: ulimit.Hard, - }) - } - return out, nil -} - func parseNetMode(v string) (pb.NetMode, error) { if v == "" { return llb.NetModeSandbox, nil diff --git a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release index b384ee811532..100af88590fa 100755 --- a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release +++ b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release @@ -65,6 +65,17 @@ if [ "$PUSH" = "push" ]; then pushFlag="push=true" fi +importCacheFlags="" +exportCacheFlags="" +if [ "$GITHUB_ACTIONS" = "true" ]; then + if [ -n "$cacheRefFrom" ]; then + importCacheFlags="--cache-from=type=local,src=$cacheRefFrom" + fi + if [ -n "$cacheRefTo" ]; then + exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo" + fi +fi + case $TYP in "master") tagf=./frontend/dockerfile/release/$TAG/tags @@ -79,7 +90,7 @@ case $TYP in pushTag=${pushTag}-$TAG fi - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --platform "$PLATFORMS" \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ @@ -97,7 +108,7 @@ case $TYP in fi buildTags=$(cat $tagf) - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --platform "$PLATFORMS" \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ @@ -124,7 +135,7 @@ case $TYP in tmp=$(mktemp -d -t buildid.XXXXXXXXXX) dt=$(date +%Y%m%d) - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --platform "$PLATFORMS" \ --target "buildid" \ --build-arg "CHANNEL=$TAG" \ @@ -139,7 +150,7 @@ case $TYP in buildid=$(cat $tmp/buildid) echo "buildid: $buildid" - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --platform "$PLATFORMS" \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index a7d09a5c9c81..c5050c37a08d 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -60,7 +60,6 @@ type ConvertOpt struct { PrefixPlatform bool ExtraHosts []llb.HostIP ShmSize int64 - Ulimit []pb.Ulimit ForceNetMode pb.NetMode OverrideCopyImage string LLBCaps *apicaps.CapSet @@ -392,7 +391,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, targetPlatform: platformOpt.targetPlatform, extraHosts: opt.ExtraHosts, shmSize: opt.ShmSize, - ulimit: opt.Ulimit, copyImage: opt.OverrideCopyImage, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, @@ -524,7 +522,6 @@ type dispatchOpt struct { buildPlatforms []ocispecs.Platform extraHosts []llb.HostIP shmSize int64 - ulimit []pb.Ulimit copyImage string llbCaps *apicaps.CapSet sourceMap *llb.SourceMap @@ -792,12 +789,6 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE opt = append(opt, networkOpt) } - if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMetaUlimit) == nil { - for _, u := range dopt.ulimit { - opt = append(opt, llb.AddUlimit(llb.UlimitName(u.Name), u.Soft, u.Hard)) - } - } - shlex := *dopt.shlex shlex.RawQuotes = true shlex.SkipUnsetEnv = true @@ -810,13 +801,9 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE for _, h := range dopt.extraHosts { opt = append(opt, llb.AddExtraHost(h.Host, h.IP)) } - - if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMountTmpfsSize) == nil { - if dopt.shmSize > 0 { - opt = append(opt, llb.AddMount("/dev/shm", llb.Scratch(), llb.Tmpfs(llb.TmpfsSize(dopt.shmSize)))) - } + if dopt.shmSize > 0 { + opt = append(opt, llb.WithShmSize(dopt.shmSize)) } - d.state = d.state.Run(opt...).Root() return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) } diff --git a/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 7777fba91ac9..87589860fe09 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -95,9 +95,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* var mountOpts []llb.MountOption if mount.Type == instructions.MountTypeTmpfs { st = llb.Scratch() - mountOpts = append(mountOpts, llb.Tmpfs( - llb.TmpfsSize(mount.SizeLimit), - )) + mountOpts = append(mountOpts, llb.Tmpfs()) } if mount.Type == instructions.MountTypeSecret { secret, err := dispatchSecret(mount) diff --git a/frontend/dockerfile/dockerfile_mount_test.go b/frontend/dockerfile/dockerfile_mount_test.go index 93666f9ad2f4..a2255f498d29 100644 --- a/frontend/dockerfile/dockerfile_mount_test.go +++ b/frontend/dockerfile/dockerfile_mount_test.go @@ -25,7 +25,6 @@ var mountTests = []integration.Test{ testMountMetaArg, testMountFromError, testMountInvalid, - testMountTmpfsSize, } func init() { @@ -453,46 +452,3 @@ RUN --mount=from=$ttt,type=cache,target=/tmp ls require.Error(t, err) require.Contains(t, err.Error(), "'from' doesn't support variable expansion, define alias stage instead") } - -func testMountTmpfsSize(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - - dockerfile := []byte(` -FROM busybox AS base -RUN --mount=type=tmpfs,target=/dev/shm,size=128m mount | grep /dev/shm > /tmpfssize -FROM scratch -COPY --from=base /tmpfssize / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - Exports: []client.ExportEntry{ - { - Type: client.ExporterLocal, - OutputDir: destDir, - }, - }, - LocalDirs: map[string]string{ - builder.DefaultLocalNameDockerfile: dir, - builder.DefaultLocalNameContext: dir, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmpfssize")) - require.NoError(t, err) - require.Contains(t, string(dt), `size=131072k`) -} diff --git a/frontend/dockerfile/dockerfile_runsecurity_test.go b/frontend/dockerfile/dockerfile_runsecurity_test.go index 358db6733554..2cf106871e77 100644 --- a/frontend/dockerfile/dockerfile_runsecurity_test.go +++ b/frontend/dockerfile/dockerfile_runsecurity_test.go @@ -1,4 +1,3 @@ -//go:build dfrunsecurity // +build dfrunsecurity package dockerfile @@ -94,7 +93,7 @@ func testRunSecurityInsecure(t *testing.T, sb integration.Sandbox) { dockerfile := []byte(` FROM busybox -RUN --security=insecure [ "$(printf '%x' $(( $(cat /proc/self/status | grep CapBnd | cut -f 2 | sed s#^#0x#) & 0x3fffffffff)))" == "3fffffffff" ] +RUN --security=insecure [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 0000003fffffffff" ] RUN [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ] `) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index a8f06fd60163..b000543aa6b3 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -115,8 +115,6 @@ var allTests = []integration.Test{ testWildcardRenameCache, testDockerfileInvalidInstruction, testBuildInfo, - testShmSize, - testUlimit, } var fileOpTests = []integration.Test{ @@ -5266,96 +5264,6 @@ COPY --from=buildx /buildx /usr/libexec/docker/cli-plugins/docker-buildx assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", bi["sources"][3].Pin) } -func testShmSize(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - dockerfile := []byte(` -FROM busybox AS base -RUN mount | grep /dev/shm > /shmsize -FROM scratch -COPY --from=base /shmsize / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "shm-size": "134217728", - }, - LocalDirs: map[string]string{ - builder.DefaultLocalNameDockerfile: dir, - builder.DefaultLocalNameContext: dir, - }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "shmsize")) - require.NoError(t, err) - require.Contains(t, string(dt), `size=131072k`) -} - -func testUlimit(t *testing.T, sb integration.Sandbox) { - f := getFrontend(t, sb) - dockerfile := []byte(` -FROM busybox AS base -RUN ulimit -n > /ulimit -FROM scratch -COPY --from=base /ulimit / -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - - c, err := client.New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "ulimit": "nofile=1062:1062", - }, - LocalDirs: map[string]string{ - builder.DefaultLocalNameDockerfile: dir, - builder.DefaultLocalNameContext: dir, - }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "ulimit")) - require.NoError(t, err) - require.Equal(t, `1062`, strings.TrimSpace(string(dt))) -} - func tmpdir(appliers ...fstest.Applier) (string, error) { tmpdir, err := ioutil.TempDir("", "buildkit-dockerfile") if err != nil { diff --git a/frontend/dockerfile/docs/syntax.md b/frontend/dockerfile/docs/syntax.md index 04bce887af39..64617f7ac781 100644 --- a/frontend/dockerfile/docs/syntax.md +++ b/frontend/dockerfile/docs/syntax.md @@ -96,7 +96,6 @@ This mount type allows mounting tmpfs in the build container. |Option |Description| |---------------------|-----------| |`target` (required) | Mount path.| -|`size` | Specify an upper limit on the size of the filesystem.| ### `RUN --mount=type=secret` diff --git a/frontend/dockerfile/instructions/commands_runmount.go b/frontend/dockerfile/instructions/commands_runmount.go index 517ded7d6788..f32b72489deb 100644 --- a/frontend/dockerfile/instructions/commands_runmount.go +++ b/frontend/dockerfile/instructions/commands_runmount.go @@ -6,7 +6,6 @@ import ( "strconv" "strings" - dockeropts "github.com/docker/docker/opts" "github.com/moby/buildkit/util/suggest" "github.com/pkg/errors" ) @@ -124,7 +123,6 @@ type Mount struct { Source string Target string ReadOnly bool - SizeLimit int64 CacheID string CacheSharing string Required bool @@ -229,16 +227,6 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } - case "size": - if m.Type == "tmpfs" { - tmpfsSize := new(dockeropts.MemBytes) - if err := tmpfsSize.Set(value); err != nil { - return nil, errors.Errorf("invalid value for %s: %s", key, value) - } - m.SizeLimit = tmpfsSize.Value() - } else { - return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) - } case "id": m.CacheID = value case "sharing": diff --git a/frontend/gateway/container.go b/frontend/gateway/container.go index 1d4bfc23f803..c8d07d9e1945 100644 --- a/frontend/gateway/container.go +++ b/frontend/gateway/container.go @@ -158,7 +158,7 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage } switch m.MountType { - case opspb.MountType_HOST_BIND: + case opspb.MountType_HOST_BIND: //earthly mountable = mm.MountableHostBind() case opspb.MountType_BIND: @@ -214,7 +214,7 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage } case opspb.MountType_TMPFS: - mountable = mm.MountableTmpFS(m) + mountable = mm.MountableTmpFS() case opspb.MountType_SECRET: var err error mountable, err = mm.MountableSecret(ctx, m, g) diff --git a/go.mod b/go.mod index 15ca619ed9c5..abdff983de15 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,6 @@ require ( // docker: the actual version is replaced in replace() github.com/docker/docker v20.10.7+incompatible // master (v21.xx-dev) github.com/docker/go-connections v0.4.0 - github.com/docker/go-units v0.4.0 github.com/gofrs/flock v0.7.3 github.com/gogo/googleapis v1.4.0 github.com/gogo/protobuf v1.3.2 @@ -31,13 +30,10 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/golang-lru v0.5.3 - github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/klauspost/compress v1.13.5 github.com/mitchellh/hashstructure v1.0.0 github.com/moby/locker v1.0.1 - github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/signal v0.5.0 - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 @@ -51,7 +47,7 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.0 github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf - github.com/tonistiigi/go-actions-cache v0.0.0-20211002214948-4d48f2ff622a + github.com/tonistiigi/go-actions-cache v0.0.0-20210714033416-b93d7f1b2e70 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f github.com/urfave/cli v1.22.4 @@ -89,10 +85,10 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.4.0 // indirect github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 // indirect @@ -104,8 +100,11 @@ require ( github.com/hanwen/go-fuse/v2 v2.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.11.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect diff --git a/go.sum b/go.sum index 4b98f7ea922c..35c687cfb425 100644 --- a/go.sum +++ b/go.sum @@ -328,8 +328,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -1039,8 +1037,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= -github.com/tonistiigi/go-actions-cache v0.0.0-20211002214948-4d48f2ff622a h1:TkwT/jFyObWQRFSUdLPEUIBXXlbqkGzStfOFgu/okCE= -github.com/tonistiigi/go-actions-cache v0.0.0-20211002214948-4d48f2ff622a/go.mod h1:YiIBjH5gP7mao3t0dBrNNBGuKYdeJmcAJjYLXr43k6A= +github.com/tonistiigi/go-actions-cache v0.0.0-20210714033416-b93d7f1b2e70 h1:+ZlFs3Tl5qYZJvX2PxfZxGlVXz847LsOJGyNVU5pCHo= +github.com/tonistiigi/go-actions-cache v0.0.0-20210714033416-b93d7f1b2e70/go.mod h1:dNS+PPTqGnSl80x3wEyWWCHeON5xiBGtcM0uD6CgHNU= github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939 h1:s6wDMZYNyWt8KvkjhrMpOthFPgI3JB8ipJS+eCV/psg= github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939/go.mod h1:Vm5u/mtkj1OMhtao0v+BGo2LUoLCgHYXvRmj0jWITlE= github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939 h1:ZZ1KHKvs97BcRoblbm6RhrDzs/OejFv7miYSIcZI7Ds= diff --git a/hack/build_ci_first_pass b/hack/build_ci_first_pass index 1d05c7e44812..379f975c576d 100755 --- a/hack/build_ci_first_pass +++ b/hack/build_ci_first_pass @@ -14,14 +14,25 @@ if [ -z "$TYP" ]; then usage fi +importCacheFlags="" +exportCacheFlags="" +if [ "$GITHUB_ACTIONS" = "true" ]; then + if [ -n "$cacheRefFrom" ]; then + importCacheFlags="--cache-from=type=local,src=$cacheRefFrom" + fi + if [ -n "$cacheRefTo" ]; then + exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo" + fi +fi + case $TYP in "binaries") - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --target "binaries" \ $currentcontext ;; "integration-tests") - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --target "integration-tests-base" \ $currentcontext ;; diff --git a/hack/cross b/hack/cross index 378ca0e8dd15..14da7ef6d254 100755 --- a/hack/cross +++ b/hack/cross @@ -5,13 +5,24 @@ set -e : ${PLATFORMS=linux/arm} +importCacheFlags="" +exportCacheFlags="" +if [ "$GITHUB_ACTIONS" = "true" ]; then + if [ -n "$cacheRefFrom" ]; then + importCacheFlags="--cache-from=type=local,src=$cacheRefFrom" + fi + if [ -n "$cacheRefTo" ]; then + exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo" + fi +fi + if [ -n "$RUNC_PLATFORMS" ]; then - buildxCmd build $cacheFromFlags $cacheToFlags \ + buildxCmd build $importCacheFlags $exportCacheFlags \ --target "binaries-linux-helper" \ --platform "$RUNC_PLATFORMS" \ $currentcontext fi -buildxCmd build $cacheFromFlags \ +buildxCmd build $importCacheFlags \ --platform "$PLATFORMS" \ $currentcontext diff --git a/hack/images b/hack/images index 6bd655416468..86d8cf269d50 100755 --- a/hack/images +++ b/hack/images @@ -64,16 +64,18 @@ importCacheFlags="" for tagName in $tagNames; do importCacheFlags="$importCacheFlags--cache-from=type=registry,ref=$tagName " done -if [ -n "$cacheFromFlags" ]; then - importCacheFlags="$importCacheFlags$cacheFromFlags" +if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then + for ref in $cacheRefFrom; do + importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref " + done fi if [ -n "$localmode" ]; then importCacheFlags="" fi exportCacheFlags="" -if [ -n "$cacheToFlags" ]; then - exportCacheFlags="$cacheToFlags" +if [[ -n "$cacheRefTo" ]] && [[ "$cacheType" = "local" ]]; then + exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo " elif [ "$PUSH" = "push" ]; then exportCacheFlags="$exportCacheFlags--cache-to=type=inline " fi diff --git a/hack/release-tar b/hack/release-tar index 3995f1fee052..ee48c0688441 100755 --- a/hack/release-tar +++ b/hack/release-tar @@ -17,7 +17,14 @@ if [ -z "$TAG" ] || [ -z "$OUT" ]; then usage fi -buildxCmd build $cacheFromFlags \ +importCacheFlags="" +if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then + for ref in $cacheRefFrom; do + importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref " + done +fi + +buildxCmd build $importCacheFlags \ --target release \ --platform "$PLATFORMS" \ --output "type=local,dest=$OUT" \ diff --git a/hack/test b/hack/test index 91e159d6ca69..60ce94540447 100755 --- a/hack/test +++ b/hack/test @@ -18,6 +18,17 @@ if [ "$TEST_DOCKERD" == "1" ] && ! file $TEST_DOCKERD_BINARY | grep "statically exit 1 fi +importCacheFlags="" +if [ -n "$cacheRefFrom" ]; then + if [ "$cacheType" = "local" ]; then + for ref in $cacheRefFrom; do + importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref " + done + else + importCacheFlags="--cache-from=type=registry,ref=$cacheRefFrom:integration-tests " + fi +fi + if [ "$#" == 0 ]; then TEST_INTEGRATION=1; fi while test $# -gt 0; do @@ -50,7 +61,7 @@ if [ "$TEST_COVERAGE" = "1" ]; then coverageFlags="-coverprofile=/coverage/coverage.txt -covermode=atomic" fi -buildxCmd build $cacheFromFlags \ +buildxCmd build $importCacheFlags \ --target "integration-tests" \ --output "type=docker,name=$iid" \ $currentcontext @@ -93,7 +104,7 @@ if [ "$TEST_DOCKERFILE" == 1 ]; then buildtags=$(cat ./frontend/dockerfile/release/$release/tags) tarout=$(mktemp -t dockerfile-frontend.XXXXXXXXXX) - buildxCmd build $cacheFromFlags \ + buildxCmd build $importCacheFlags \ --build-arg "BUILDTAGS=$buildtags" \ --file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \ --output "type=oci,dest=$tarout" \ diff --git a/hack/util b/hack/util index b67174d61d7e..60e85264d92b 100755 --- a/hack/util +++ b/hack/util @@ -5,8 +5,8 @@ export BUILDX_NO_DEFAULT_LOAD=true : ${PREFER_LEGACY=} : ${CI=} : ${GITHUB_ACTIONS=} -: ${CACHE_FROM=} -: ${CACHE_TO=} +: ${CACHEDIR_FROM=} +: ${CACHEDIR_TO=} if [ "$PREFER_BUILDCTL" = "1" ]; then echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring." @@ -41,23 +41,18 @@ buildxCmd() { fi } +cacheType="" +cacheRefFrom="" +cacheRefTo="" currentref="" -currentcontext="." -cacheFromFlags="" -cacheToFlags="" if [ "$GITHUB_ACTIONS" = "true" ]; then currentref="git://github.com/$GITHUB_REPOSITORY#$GITHUB_REF" - if [ -n "$CACHE_FROM" ]; then - for cfrom in $CACHE_FROM; do - cacheFromFlags="${cacheFromFlags}--cache-from=$cfrom " - done - fi - if [ -n "$CACHE_TO" ]; then - for cto in $CACHE_TO; do - cacheToFlags="${cacheToFlags}--cache-to=$cto " - done - fi + cacheType="local" + cacheRefFrom="$CACHEDIR_FROM" + cacheRefTo="$CACHEDIR_TO" fi + +currentcontext="." if [ -n "$currentref" ]; then currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref" fi diff --git a/session/filesync/diffcopy.go b/session/filesync/diffcopy.go index a30b64afbadf..bcf8f45bdca3 100644 --- a/session/filesync/diffcopy.go +++ b/session/filesync/diffcopy.go @@ -22,7 +22,7 @@ type Stream interface { } func sendDiffCopy(stream Stream, fs fsutil.FS, progress progressCb) error { - return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress, nil)) + return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress, nil)) //earthly needs to pass nil } func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { diff --git a/session/filesync/filesync.go b/session/filesync/filesync.go index 90b21e8b240b..c77bb0faaf11 100644 --- a/session/filesync/filesync.go +++ b/session/filesync/filesync.go @@ -239,7 +239,7 @@ func NewFSSyncTarget(f func(map[string]string) (io.WriteCloser, error)) session. return p } -// NewFSSyncTarget allows writing into an io.WriteCloser - THIS IS EARTHLY SPECIFIC +// NewFSSyncTarget allows writing into an io.WriteCloser; it is earthly-specific func NewFSSyncMultiTarget(f func(map[string]string) (io.WriteCloser, error), outdirFunc func(map[string]string) (string, error)) session.Attachable { p := &fsSyncTarget{ f: f, @@ -250,7 +250,7 @@ func NewFSSyncMultiTarget(f func(map[string]string) (io.WriteCloser, error), out type fsSyncTarget struct { outdir string - outdirFunc func(map[string]string) (string, error) // earthly-specific + outdirFunc func(map[string]string) (string, error) //earthly f func(map[string]string) (io.WriteCloser, error) } @@ -314,7 +314,7 @@ func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress return sendDiffCopy(cc, fs, progress) } -// CopyToCallerWithMeta is earthly-specific +// CopyToCallerWithMeta is earthly specific func CopyToCallerWithMeta(ctx context.Context, md map[string]string, fs fsutil.FS, c session.Caller, progress func(int, bool)) error { method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") if !c.Supports(method) { diff --git a/solver/exporter.go b/solver/exporter.go index 3f983327429a..26ca2fb9291a 100644 --- a/solver/exporter.go +++ b/solver/exporter.go @@ -11,6 +11,7 @@ type exporter struct { records []*CacheRecord record *CacheRecord + res []CacheExporterRecord edge *edge // for secondaryExporters override *bool } @@ -51,10 +52,9 @@ func addBacklinks(t CacheExporterTarget, rec CacheExporterRecord, cm *cacheManag return rec, nil } -type contextT string +type backlinkT struct{} -var backlinkKey = contextT("solver/exporter/backlinks") -var resKey = contextT("solver/exporter/res") +var backlinkKey = backlinkT{} func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) { var bkm map[string]CacheExporterRecord @@ -66,16 +66,8 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach bkm = bk.(map[string]CacheExporterRecord) } - var res map[*exporter][]CacheExporterRecord - if r := ctx.Value(resKey); r == nil { - res = map[*exporter][]CacheExporterRecord{} - ctx = context.WithValue(ctx, resKey, res) - } else { - res = r.(map[*exporter][]CacheExporterRecord) - } - if t.Visited(e) { - return res[e], nil + return e.res, nil } t.Visit(e) @@ -188,9 +180,9 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach } } - res[e] = allRec + e.res = allRec - return allRec, nil + return e.res, nil } func getBestResult(records []*CacheRecord) *CacheRecord { diff --git a/solver/jobs.go b/solver/jobs.go index a67b2b272454..4d125cbdfa27 100644 --- a/solver/jobs.go +++ b/solver/jobs.go @@ -946,8 +946,6 @@ func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bo v.Cached = cached if err != nil { v.Error = err.Error() - } else { - v.Error = "" } pw.Write(v.Digest.String(), *v) } diff --git a/solver/llbsolver/mounts/mount.go b/solver/llbsolver/mounts/mount.go index 3b1011a9d187..da35681a77f3 100644 --- a/solver/llbsolver/mounts/mount.go +++ b/solver/llbsolver/mounts/mount.go @@ -369,11 +369,11 @@ func (mm *MountManager) MountableCache(ctx context.Context, m *pb.Mount, ref cac return mm.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing, g) } -func (mm *MountManager) MountableTmpFS(m *pb.Mount) cache.Mountable { - return newTmpfs(mm.cm.IdentityMapping(), m.TmpfsOpt) +func (mm *MountManager) MountableTmpFS() cache.Mountable { + return newTmpfs(mm.cm.IdentityMapping()) } -func (mm *MountManager) MountableHostBind() cache.Mountable { +func (mm *MountManager) MountableHostBind() cache.Mountable { // earthly-specific return newHostBind(mm.cm.IdentityMapping()) } @@ -385,23 +385,21 @@ func (mm *MountManager) MountableSSH(ctx context.Context, m *pb.Mount, g session return mm.getSSHMountable(ctx, m, g) } -func newTmpfs(idmap *idtools.IdentityMapping, opt *pb.TmpfsOpt) cache.Mountable { - return &tmpfs{idmap: idmap, opt: opt} +func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable { + return &tmpfs{idmap: idmap} } type tmpfs struct { idmap *idtools.IdentityMapping - opt *pb.TmpfsOpt } func (f *tmpfs) Mount(ctx context.Context, readonly bool, g session.Group) (snapshot.Mountable, error) { - return &tmpfsMount{readonly: readonly, idmap: f.idmap, opt: f.opt}, nil + return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil } type tmpfsMount struct { readonly bool idmap *idtools.IdentityMapping - opt *pb.TmpfsOpt } func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { @@ -409,11 +407,6 @@ func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { if m.readonly { opt = append(opt, "ro") } - if m.opt != nil { - if m.opt.Size_ > 0 { - opt = append(opt, fmt.Sprintf("size=%d", m.opt.Size_)) - } - } return []mount.Mount{{ Type: "tmpfs", Source: "tmpfs", @@ -425,7 +418,7 @@ func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping { return m.idmap } -// earthly-specific feature +// earthly-specific hostbind functions func newHostBind(idmap *idtools.IdentityMapping) cache.Mountable { return &hostBind{idmap: idmap} } diff --git a/solver/llbsolver/ops/exec.go b/solver/llbsolver/ops/exec.go index f58544471d8d..51a9e12349b7 100644 --- a/solver/llbsolver/ops/exec.go +++ b/solver/llbsolver/ops/exec.go @@ -45,7 +45,7 @@ type execOp struct { platform *pb.Platform numInputs int parallelism *semaphore.Weighted - sm *session.Manager + sm *session.Manager //earthly } func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (solver.Op, error) { @@ -62,7 +62,7 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. w: w, platform: platform, parallelism: parallelism, - sm: sm, + sm: sm, //earthly }, nil } @@ -321,7 +321,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu Hostname: e.op.Meta.Hostname, ReadonlyRootFS: p.ReadonlyRootFS, ExtraHosts: extraHosts, - Ulimit: e.op.Meta.Ulimit, + ShmSize: e.op.Meta.ShmSize, NetMode: e.op.Network, SecurityMode: e.op.Security, } @@ -370,6 +370,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return results, errors.Wrapf(execErr, "process %q did not complete successfully", strings.Join(e.op.Meta.Args, " ")) } +// earthly-specific func (e *execOp) doFromLocalHack(ctx context.Context, root executor.Mount, mounts []executor.Mount, g session.Group, meta executor.Meta, stdout, stderr io.WriteCloser) (bool, error) { var cmd string if len(meta.Args) > 0 { diff --git a/solver/pb/caps.go b/solver/pb/caps.go index 8e6c7c61bb5e..0f7aaeee60b0 100644 --- a/solver/pb/caps.go +++ b/solver/pb/caps.go @@ -35,23 +35,22 @@ const ( CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" - CapExecMetaBase apicaps.CapID = "exec.meta.base" - CapExecMetaNetwork apicaps.CapID = "exec.meta.network" - CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" - CapExecMetaSecurity apicaps.CapID = "exec.meta.security" + CapExecMetaBase apicaps.CapID = "exec.meta.base" + CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" + CapExecMetaNetwork apicaps.CapID = "exec.meta.network" + CapExecMetaSecurity apicaps.CapID = "exec.meta.security" + CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" + CapExecMountBind apicaps.CapID = "exec.mount.bind" + CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" + CapExecMountCache apicaps.CapID = "exec.mount.cache" + CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" + CapExecMountSelector apicaps.CapID = "exec.mount.selector" + CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" + CapExecMountSecret apicaps.CapID = "exec.mount.secret" + CapExecMountSSH apicaps.CapID = "exec.mount.ssh" + CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" + CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" - CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" - CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" - CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" - CapExecMountCache apicaps.CapID = "exec.mount.cache" - CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" - CapExecMountSelector apicaps.CapID = "exec.mount.selector" - CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" - CapExecMountTmpfsSize apicaps.CapID = "exec.mount.tmpfs.size" - CapExecMountSecret apicaps.CapID = "exec.mount.secret" - CapExecMountSSH apicaps.CapID = "exec.mount.ssh" - CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" CapFileBase apicaps.CapID = "file.base" CapFileRmWildcard apicaps.CapID = "file.rm.wildcard" @@ -237,12 +236,6 @@ func init() { Status: apicaps.CapStatusExperimental, }) - Caps.Init(apicaps.Cap{ - ID: CapExecMetaUlimit, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - Caps.Init(apicaps.Cap{ ID: CapExecMountBind, Enabled: true, @@ -279,12 +272,6 @@ func init() { Status: apicaps.CapStatusExperimental, }) - Caps.Init(apicaps.Cap{ - ID: CapExecMountTmpfsSize, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - Caps.Init(apicaps.Cap{ ID: CapExecMountSecret, Enabled: true, diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 56874bc54cc4..a49857b98fbb 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -471,7 +471,7 @@ type Meta struct { ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` - Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` + ShmSize int64 `protobuf:"varint,8,opt,name=shmSize,proto3" json:"shmSize,omitempty"` } func (m *Meta) Reset() { *m = Meta{} } @@ -552,113 +552,9 @@ func (m *Meta) GetHostname() string { return "" } -func (m *Meta) GetUlimit() []*Ulimit { +func (m *Meta) GetShmSize() int64 { if m != nil { - return m.Ulimit - } - return nil -} - -type HostIP struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` -} - -func (m *HostIP) Reset() { *m = HostIP{} } -func (m *HostIP) String() string { return proto.CompactTextString(m) } -func (*HostIP) ProtoMessage() {} -func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{5} -} -func (m *HostIP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostIP) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostIP.Merge(m, src) -} -func (m *HostIP) XXX_Size() int { - return m.Size() -} -func (m *HostIP) XXX_DiscardUnknown() { - xxx_messageInfo_HostIP.DiscardUnknown(m) -} - -var xxx_messageInfo_HostIP proto.InternalMessageInfo - -func (m *HostIP) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *HostIP) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -type Ulimit struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Soft int64 `protobuf:"varint,2,opt,name=Soft,proto3" json:"Soft,omitempty"` - Hard int64 `protobuf:"varint,3,opt,name=Hard,proto3" json:"Hard,omitempty"` -} - -func (m *Ulimit) Reset() { *m = Ulimit{} } -func (m *Ulimit) String() string { return proto.CompactTextString(m) } -func (*Ulimit) ProtoMessage() {} -func (*Ulimit) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{6} -} -func (m *Ulimit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Ulimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Ulimit) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ulimit.Merge(m, src) -} -func (m *Ulimit) XXX_Size() int { - return m.Size() -} -func (m *Ulimit) XXX_DiscardUnknown() { - xxx_messageInfo_Ulimit.DiscardUnknown(m) -} - -var xxx_messageInfo_Ulimit proto.InternalMessageInfo - -func (m *Ulimit) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Ulimit) GetSoft() int64 { - if m != nil { - return m.Soft - } - return 0 -} - -func (m *Ulimit) GetHard() int64 { - if m != nil { - return m.Hard + return m.ShmSize } return 0 } @@ -671,7 +567,6 @@ type Mount struct { Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` - TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"` CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` @@ -682,7 +577,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{7} + return fileDescriptor_8de16154b2733812, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -735,13 +630,6 @@ func (m *Mount) GetMountType() MountType { return MountType_BIND } -func (m *Mount) GetTmpfsOpt() *TmpfsOpt { - if m != nil { - return m.TmpfsOpt - } - return nil -} - func (m *Mount) GetCacheOpt() *CacheOpt { if m != nil { return m.CacheOpt @@ -770,48 +658,6 @@ func (m *Mount) GetResultID() string { return "" } -// TmpfsOpt defines options describing tpmfs mounts -type TmpfsOpt struct { - // Specify an upper limit on the size of the filesystem. - Size_ int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` -} - -func (m *TmpfsOpt) Reset() { *m = TmpfsOpt{} } -func (m *TmpfsOpt) String() string { return proto.CompactTextString(m) } -func (*TmpfsOpt) ProtoMessage() {} -func (*TmpfsOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{8} -} -func (m *TmpfsOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TmpfsOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TmpfsOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_TmpfsOpt.Merge(m, src) -} -func (m *TmpfsOpt) XXX_Size() int { - return m.Size() -} -func (m *TmpfsOpt) XXX_DiscardUnknown() { - xxx_messageInfo_TmpfsOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_TmpfsOpt proto.InternalMessageInfo - -func (m *TmpfsOpt) GetSize_() int64 { - if m != nil { - return m.Size_ - } - return 0 -} - // CacheOpt defines options specific to cache mounts type CacheOpt struct { // ID is an optional namespace for the mount @@ -824,7 +670,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{9} + return fileDescriptor_8de16154b2733812, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -882,7 +728,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{10} + return fileDescriptor_8de16154b2733812, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -961,7 +807,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{11} + return fileDescriptor_8de16154b2733812, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1034,7 +880,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{12} + return fileDescriptor_8de16154b2733812, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1086,7 +932,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{13} + return fileDescriptor_8de16154b2733812, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1141,7 +987,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{14} + return fileDescriptor_8de16154b2733812, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1182,7 +1028,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{15} + return fileDescriptor_8de16154b2733812, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1245,7 +1091,7 @@ func (m *Source) Reset() { *m = Source{} } func (m *Source) String() string { return proto.CompactTextString(m) } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{16} + return fileDescriptor_8de16154b2733812, []int{13} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1293,7 +1139,7 @@ func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) String() string { return proto.CompactTextString(m) } func (*Locations) ProtoMessage() {} func (*Locations) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{17} + return fileDescriptor_8de16154b2733812, []int{14} } func (m *Locations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1336,7 +1182,7 @@ func (m *SourceInfo) Reset() { *m = SourceInfo{} } func (m *SourceInfo) String() string { return proto.CompactTextString(m) } func (*SourceInfo) ProtoMessage() {} func (*SourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{18} + return fileDescriptor_8de16154b2733812, []int{15} } func (m *SourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1392,7 +1238,7 @@ func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{19} + return fileDescriptor_8de16154b2733812, []int{16} } func (m *Location) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1441,7 +1287,7 @@ func (m *Range) Reset() { *m = Range{} } func (m *Range) String() string { return proto.CompactTextString(m) } func (*Range) ProtoMessage() {} func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{20} + return fileDescriptor_8de16154b2733812, []int{17} } func (m *Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1490,7 +1336,7 @@ func (m *Position) Reset() { *m = Position{} } func (m *Position) String() string { return proto.CompactTextString(m) } func (*Position) ProtoMessage() {} func (*Position) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{21} + return fileDescriptor_8de16154b2733812, []int{18} } func (m *Position) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1537,7 +1383,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{22} + return fileDescriptor_8de16154b2733812, []int{19} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1581,7 +1427,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{23} + return fileDescriptor_8de16154b2733812, []int{20} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1650,7 +1496,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{24} + return fileDescriptor_8de16154b2733812, []int{21} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1697,7 +1543,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{25} + return fileDescriptor_8de16154b2733812, []int{22} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1743,6 +1589,54 @@ func (m *Definition) GetSource() *Source { return nil } +type HostIP struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` +} + +func (m *HostIP) Reset() { *m = HostIP{} } +func (m *HostIP) String() string { return proto.CompactTextString(m) } +func (*HostIP) ProtoMessage() {} +func (*HostIP) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{23} +} +func (m *HostIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostIP.Merge(m, src) +} +func (m *HostIP) XXX_Size() int { + return m.Size() +} +func (m *HostIP) XXX_DiscardUnknown() { + xxx_messageInfo_HostIP.DiscardUnknown(m) +} + +var xxx_messageInfo_HostIP proto.InternalMessageInfo + +func (m *HostIP) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *HostIP) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + type FileOp struct { Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` } @@ -1751,7 +1645,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{26} + return fileDescriptor_8de16154b2733812, []int{24} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1799,7 +1693,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{27} + return fileDescriptor_8de16154b2733812, []int{25} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1926,7 +1820,7 @@ func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{28} + return fileDescriptor_8de16154b2733812, []int{26} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2059,7 +1953,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{29} + return fileDescriptor_8de16154b2733812, []int{27} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2136,7 +2030,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{30} + return fileDescriptor_8de16154b2733812, []int{28} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2209,7 +2103,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{31} + return fileDescriptor_8de16154b2733812, []int{29} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2158,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{32} + return fileDescriptor_8de16154b2733812, []int{30} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2314,7 +2208,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{33} + return fileDescriptor_8de16154b2733812, []int{31} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2393,7 +2287,7 @@ func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } func (*NamedUserOpt) ProtoMessage() {} func (*NamedUserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{34} + return fileDescriptor_8de16154b2733812, []int{32} } func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2435,10 +2329,7 @@ func init() { proto.RegisterType((*Input)(nil), "pb.Input") proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") proto.RegisterType((*Meta)(nil), "pb.Meta") - proto.RegisterType((*HostIP)(nil), "pb.HostIP") - proto.RegisterType((*Ulimit)(nil), "pb.Ulimit") proto.RegisterType((*Mount)(nil), "pb.Mount") - proto.RegisterType((*TmpfsOpt)(nil), "pb.TmpfsOpt") proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") @@ -2463,6 +2354,7 @@ func init() { proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") proto.RegisterType((*Definition)(nil), "pb.Definition") proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") + proto.RegisterType((*HostIP)(nil), "pb.HostIP") proto.RegisterType((*FileOp)(nil), "pb.FileOp") proto.RegisterType((*FileAction)(nil), "pb.FileAction") proto.RegisterType((*FileActionCopy)(nil), "pb.FileActionCopy") @@ -2477,154 +2369,150 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2344 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x6f, 0x1c, 0xc7, - 0xd1, 0xe6, 0x7e, 0xef, 0xd4, 0x2e, 0xa9, 0x7d, 0xdb, 0xb2, 0xbd, 0xe6, 0xab, 0x90, 0xf4, 0xd8, - 0x31, 0x28, 0x4a, 0x5a, 0x02, 0x34, 0x60, 0x19, 0x46, 0x10, 0x80, 0xfb, 0x21, 0x70, 0x2d, 0x89, - 0x4b, 0xf4, 0x52, 0x52, 0x6e, 0xc2, 0x70, 0xb6, 0x97, 0x1c, 0x70, 0x76, 0x7a, 0x30, 0xd3, 0x2b, - 0x71, 0x73, 0xc8, 0xc1, 0xbf, 0xc0, 0x40, 0x80, 0x20, 0x97, 0x24, 0xc8, 0x7f, 0xc8, 0x35, 0x77, - 0x1f, 0x7d, 0xc8, 0xc1, 0xc8, 0xc1, 0x09, 0xa4, 0x53, 0xfe, 0x43, 0x02, 0x04, 0x55, 0xdd, 0xf3, - 0xb1, 0x14, 0x05, 0x49, 0x48, 0x90, 0xd3, 0x54, 0x3f, 0xf5, 0x74, 0x75, 0x75, 0x57, 0x75, 0x77, - 0xf5, 0x80, 0x25, 0xc3, 0xb8, 0x13, 0x46, 0x52, 0x49, 0x56, 0x0c, 0x4f, 0xd6, 0xef, 0x9c, 0x7a, - 0xea, 0x6c, 0x7e, 0xd2, 0x71, 0xe5, 0x6c, 0xf7, 0x54, 0x9e, 0xca, 0x5d, 0x52, 0x9d, 0xcc, 0xa7, - 0xd4, 0xa2, 0x06, 0x49, 0xba, 0x8b, 0xfd, 0xc7, 0x22, 0x14, 0x47, 0x21, 0xfb, 0x18, 0xaa, 0x5e, - 0x10, 0xce, 0x55, 0xdc, 0x2e, 0x6c, 0x95, 0xb6, 0x1b, 0x7b, 0x56, 0x27, 0x3c, 0xe9, 0x0c, 0x11, - 0xe1, 0x46, 0xc1, 0xb6, 0xa0, 0x2c, 0x2e, 0x84, 0xdb, 0x2e, 0x6e, 0x15, 0xb6, 0x1b, 0x7b, 0x80, - 0x84, 0xc1, 0x85, 0x70, 0x47, 0xe1, 0xc1, 0x0a, 0x27, 0x0d, 0xfb, 0x0c, 0xaa, 0xb1, 0x9c, 0x47, - 0xae, 0x68, 0x97, 0x88, 0xd3, 0x44, 0xce, 0x98, 0x10, 0x62, 0x19, 0x2d, 0x5a, 0x9a, 0x7a, 0xbe, - 0x68, 0x97, 0x33, 0x4b, 0xf7, 0x3c, 0x5f, 0x73, 0x48, 0xc3, 0x3e, 0x81, 0xca, 0xc9, 0xdc, 0xf3, - 0x27, 0xed, 0x0a, 0x51, 0x1a, 0x48, 0xe9, 0x22, 0x40, 0x1c, 0xad, 0x63, 0xdb, 0x50, 0x0f, 0x7d, - 0x47, 0x4d, 0x65, 0x34, 0x6b, 0x43, 0x36, 0xe0, 0x91, 0xc1, 0x78, 0xaa, 0x65, 0x77, 0xa1, 0xe1, - 0xca, 0x20, 0x56, 0x91, 0xe3, 0x05, 0x2a, 0x6e, 0x37, 0x88, 0xfc, 0x3e, 0x92, 0x9f, 0xc8, 0xe8, - 0x5c, 0x44, 0xbd, 0x4c, 0xc9, 0xf3, 0xcc, 0x6e, 0x19, 0x8a, 0x32, 0xb4, 0x7f, 0x53, 0x80, 0x7a, - 0x62, 0x95, 0xd9, 0xd0, 0xdc, 0x8f, 0xdc, 0x33, 0x4f, 0x09, 0x57, 0xcd, 0x23, 0xd1, 0x2e, 0x6c, - 0x15, 0xb6, 0x2d, 0xbe, 0x84, 0xb1, 0x35, 0x28, 0x8e, 0xc6, 0xb4, 0x50, 0x16, 0x2f, 0x8e, 0xc6, - 0xac, 0x0d, 0xb5, 0xc7, 0x4e, 0xe4, 0x39, 0x81, 0xa2, 0x95, 0xb1, 0x78, 0xd2, 0x64, 0x37, 0xc0, - 0x1a, 0x8d, 0x1f, 0x8b, 0x28, 0xf6, 0x64, 0x40, 0xeb, 0x61, 0xf1, 0x0c, 0x60, 0x1b, 0x00, 0xa3, - 0xf1, 0x3d, 0xe1, 0xa0, 0xd1, 0xb8, 0x5d, 0xd9, 0x2a, 0x6d, 0x5b, 0x3c, 0x87, 0xd8, 0xbf, 0x82, - 0x0a, 0xc5, 0x88, 0x7d, 0x0d, 0xd5, 0x89, 0x77, 0x2a, 0x62, 0xa5, 0xdd, 0xe9, 0xee, 0x7d, 0xf7, - 0xe3, 0xe6, 0xca, 0x5f, 0x7f, 0xdc, 0xdc, 0xc9, 0x25, 0x83, 0x0c, 0x45, 0xe0, 0xca, 0x40, 0x39, - 0x5e, 0x20, 0xa2, 0x78, 0xf7, 0x54, 0xde, 0xd1, 0x5d, 0x3a, 0x7d, 0xfa, 0x70, 0x63, 0x81, 0xdd, - 0x84, 0x8a, 0x17, 0x4c, 0xc4, 0x05, 0xf9, 0x5f, 0xea, 0xbe, 0x67, 0x4c, 0x35, 0x46, 0x73, 0x15, - 0xce, 0xd5, 0x10, 0x55, 0x5c, 0x33, 0xec, 0xdf, 0x17, 0xa0, 0xaa, 0x73, 0x80, 0xdd, 0x80, 0xf2, - 0x4c, 0x28, 0x87, 0xc6, 0x6f, 0xec, 0xd5, 0x71, 0x6d, 0x1f, 0x0a, 0xe5, 0x70, 0x42, 0x31, 0xbd, - 0x66, 0x72, 0x8e, 0x6b, 0x5f, 0xcc, 0xd2, 0xeb, 0x21, 0x22, 0xdc, 0x28, 0xd8, 0x4f, 0xa1, 0x16, - 0x08, 0xf5, 0x5c, 0x46, 0xe7, 0xb4, 0x46, 0x6b, 0x3a, 0xe8, 0x87, 0x42, 0x3d, 0x94, 0x13, 0xc1, - 0x13, 0x1d, 0xbb, 0x0d, 0xf5, 0x58, 0xb8, 0xf3, 0xc8, 0x53, 0x0b, 0x5a, 0xaf, 0xb5, 0xbd, 0x16, - 0x65, 0x99, 0xc1, 0x88, 0x9c, 0x32, 0xec, 0x7f, 0x14, 0xa0, 0x8c, 0x6e, 0x30, 0x06, 0x65, 0x27, - 0x3a, 0xd5, 0xd9, 0x6d, 0x71, 0x92, 0x59, 0x0b, 0x4a, 0x22, 0x78, 0x46, 0x1e, 0x59, 0x1c, 0x45, - 0x44, 0xdc, 0xe7, 0x13, 0x13, 0x23, 0x14, 0xb1, 0xdf, 0x3c, 0x16, 0x91, 0x09, 0x0d, 0xc9, 0xec, - 0x26, 0x58, 0x61, 0x24, 0x2f, 0x16, 0x4f, 0xb1, 0x77, 0x25, 0x97, 0x78, 0x08, 0x0e, 0x82, 0x67, - 0xbc, 0x1e, 0x1a, 0x89, 0xed, 0x00, 0x88, 0x0b, 0x15, 0x39, 0x07, 0x32, 0x56, 0x71, 0xbb, 0x4a, - 0x73, 0xa7, 0x7c, 0x47, 0x60, 0x78, 0xc4, 0x73, 0x5a, 0xb6, 0x0e, 0xf5, 0x33, 0x19, 0xab, 0xc0, - 0x99, 0x89, 0x76, 0x8d, 0x86, 0x4b, 0xdb, 0xcc, 0x86, 0xea, 0xdc, 0xf7, 0x66, 0x9e, 0x6a, 0x5b, - 0x99, 0x8d, 0x47, 0x84, 0x70, 0xa3, 0xb1, 0x6f, 0x43, 0x55, 0x5b, 0x45, 0xa7, 0x51, 0x32, 0xa9, - 0x49, 0x32, 0xa6, 0xe4, 0xf0, 0x28, 0x49, 0xc9, 0xe1, 0x91, 0xdd, 0x87, 0xaa, 0xee, 0x8f, 0xec, - 0x43, 0x1c, 0xd3, 0xb0, 0x51, 0x46, 0x6c, 0x2c, 0xa7, 0x4a, 0xa7, 0x00, 0x27, 0x99, 0xac, 0x3a, - 0x91, 0x5e, 0x9d, 0x12, 0x27, 0xd9, 0xfe, 0x6d, 0x09, 0x2a, 0x14, 0x46, 0xb6, 0x8d, 0x59, 0x13, - 0xce, 0xf5, 0xa0, 0xa5, 0x2e, 0x33, 0x59, 0x03, 0x94, 0x9f, 0x69, 0xd2, 0x60, 0xae, 0xae, 0x63, - 0x04, 0x7d, 0xe1, 0x2a, 0x19, 0x19, 0x7f, 0xd2, 0x36, 0x8e, 0x31, 0xc1, 0x2c, 0xd6, 0x11, 0x20, - 0x99, 0xdd, 0x82, 0xaa, 0xa4, 0xd4, 0xa3, 0x20, 0xbc, 0x26, 0x21, 0x0d, 0x05, 0x8d, 0x47, 0xc2, - 0x99, 0xc8, 0xc0, 0x5f, 0x50, 0x68, 0xea, 0x3c, 0x6d, 0xb3, 0x5b, 0x60, 0x51, 0xae, 0x1d, 0x2f, - 0x42, 0xd1, 0xae, 0x52, 0xee, 0xac, 0xa6, 0x79, 0x88, 0x20, 0xcf, 0xf4, 0x78, 0xb8, 0x1c, 0xcf, - 0xc2, 0x69, 0x3c, 0x0a, 0x55, 0xfb, 0xbd, 0x2c, 0xc6, 0x09, 0xc6, 0x53, 0x2d, 0x32, 0x5d, 0xc7, - 0x3d, 0x13, 0xc8, 0xbc, 0x9e, 0x31, 0x7b, 0x06, 0xe3, 0xa9, 0x16, 0x1d, 0x88, 0x85, 0x1b, 0x09, - 0x85, 0xd4, 0xf7, 0x89, 0xba, 0x6a, 0x92, 0x57, 0x83, 0x3c, 0xd3, 0x63, 0xc8, 0xc7, 0xe3, 0x03, - 0x64, 0x7e, 0x90, 0x1d, 0x93, 0x1a, 0xe1, 0x46, 0xa3, 0x67, 0x1b, 0xcf, 0x7d, 0x35, 0xec, 0xb7, - 0x3f, 0xd4, 0x4b, 0x99, 0xb4, 0xed, 0x8d, 0x6c, 0x02, 0xb8, 0xac, 0xb1, 0xf7, 0x4b, 0x1d, 0xe2, - 0x12, 0x27, 0xd9, 0x1e, 0x42, 0x3d, 0x71, 0x91, 0x92, 0xa3, 0x6f, 0x12, 0xa0, 0x38, 0xec, 0xb3, - 0x3b, 0x50, 0x8b, 0xcf, 0x9c, 0xc8, 0x0b, 0x4e, 0x29, 0x42, 0x6b, 0x7b, 0xef, 0xa5, 0x33, 0x1a, - 0x6b, 0x1c, 0xbd, 0x48, 0x38, 0xb6, 0x04, 0x2b, 0x9d, 0xc2, 0x2b, 0xb6, 0x5a, 0x50, 0x9a, 0x7b, - 0x13, 0xb2, 0xb3, 0xca, 0x51, 0x44, 0xe4, 0xd4, 0xd3, 0x79, 0xb4, 0xca, 0x51, 0x44, 0xff, 0x66, - 0x72, 0xa2, 0x2f, 0x84, 0x55, 0x4e, 0x32, 0xce, 0x4d, 0x86, 0xca, 0x93, 0x81, 0xe3, 0x27, 0x91, - 0x4c, 0xda, 0xb6, 0x9f, 0xac, 0xcd, 0xff, 0x64, 0xb4, 0x5f, 0x17, 0xa0, 0x9e, 0xdc, 0x62, 0x78, - 0x24, 0x7b, 0x13, 0x11, 0x28, 0x6f, 0xea, 0x89, 0xc8, 0x0c, 0x9c, 0x43, 0xd8, 0x1d, 0xa8, 0x38, - 0x4a, 0x45, 0xc9, 0x41, 0xf7, 0x61, 0xfe, 0x0a, 0xec, 0xec, 0xa3, 0x66, 0x10, 0xa8, 0x68, 0xc1, - 0x35, 0x6b, 0xfd, 0x4b, 0x80, 0x0c, 0x44, 0x5f, 0xcf, 0xc5, 0xc2, 0x58, 0x45, 0x91, 0x5d, 0x87, - 0xca, 0x33, 0xc7, 0x9f, 0x0b, 0xb3, 0x53, 0x74, 0xe3, 0xab, 0xe2, 0x97, 0x05, 0xfb, 0xcf, 0x45, - 0xa8, 0x99, 0x2b, 0x91, 0xdd, 0x86, 0x1a, 0x5d, 0x89, 0xc6, 0xa3, 0xab, 0xb7, 0x5f, 0x42, 0x61, - 0xbb, 0xe9, 0x5d, 0x9f, 0xf3, 0xd1, 0x98, 0xd2, 0x77, 0xbe, 0xf1, 0x31, 0xbb, 0xf9, 0x4b, 0x13, - 0x31, 0x35, 0x97, 0xfa, 0x1a, 0xb2, 0xfb, 0x62, 0xea, 0x05, 0x1e, 0xae, 0x0f, 0x47, 0x15, 0xbb, - 0x9d, 0xcc, 0xba, 0x4c, 0x16, 0x3f, 0xc8, 0x5b, 0x7c, 0x75, 0xd2, 0x43, 0x68, 0xe4, 0x86, 0xb9, - 0x62, 0xd6, 0x9f, 0xe6, 0x67, 0x6d, 0x86, 0x24, 0x73, 0xba, 0x22, 0xc9, 0x56, 0xe1, 0x3f, 0x58, - 0xbf, 0x2f, 0x00, 0x32, 0x93, 0x6f, 0x7f, 0x7c, 0xd9, 0xdf, 0x94, 0x00, 0x46, 0x21, 0x5e, 0x2a, - 0x13, 0x87, 0x6e, 0xb6, 0xa6, 0x77, 0x1a, 0xc8, 0x48, 0x3c, 0xa5, 0x6d, 0x4e, 0xfd, 0xeb, 0xbc, - 0xa1, 0x31, 0xda, 0x31, 0x6c, 0x1f, 0x1a, 0x13, 0x11, 0xbb, 0x91, 0x47, 0x09, 0x65, 0x16, 0x7d, - 0x13, 0xe7, 0x94, 0xd9, 0xe9, 0xf4, 0x33, 0x86, 0x5e, 0xab, 0x7c, 0x1f, 0xb6, 0x07, 0x4d, 0x71, - 0x11, 0xca, 0x48, 0x99, 0x51, 0x74, 0xe5, 0x74, 0x4d, 0xd7, 0x60, 0x88, 0xd3, 0x48, 0xbc, 0x21, - 0xb2, 0x06, 0x73, 0xa0, 0xec, 0x3a, 0xa1, 0x2e, 0x1b, 0x1a, 0x7b, 0xed, 0x4b, 0xe3, 0xf5, 0x9c, - 0x50, 0x2f, 0x5a, 0xf7, 0x73, 0x9c, 0xeb, 0x37, 0x7f, 0xdb, 0xbc, 0x95, 0xab, 0x15, 0x66, 0xf2, - 0x64, 0xb1, 0x4b, 0xf9, 0x72, 0xee, 0xa9, 0xdd, 0xb9, 0xf2, 0xfc, 0x5d, 0x27, 0xf4, 0xd0, 0x1c, - 0x76, 0x1c, 0xf6, 0x39, 0x99, 0x5e, 0xff, 0x39, 0xb4, 0x2e, 0xfb, 0xfd, 0x2e, 0x31, 0x58, 0xbf, - 0x0b, 0x56, 0xea, 0xc7, 0x9b, 0x3a, 0xd6, 0xf3, 0xc1, 0xfb, 0x53, 0x01, 0xaa, 0x7a, 0x57, 0xb1, - 0xbb, 0x60, 0xf9, 0xd2, 0x75, 0xd0, 0x81, 0xa4, 0x78, 0xfd, 0x28, 0xdb, 0x74, 0x9d, 0x07, 0x89, - 0x4e, 0xaf, 0x6a, 0xc6, 0xc5, 0x24, 0xf3, 0x82, 0xa9, 0x4c, 0x76, 0xc1, 0x5a, 0xd6, 0x69, 0x18, - 0x4c, 0x25, 0xd7, 0xca, 0xf5, 0xfb, 0xb0, 0xb6, 0x6c, 0xe2, 0x0a, 0x3f, 0x3f, 0x59, 0x4e, 0x57, - 0x3a, 0xd3, 0xd3, 0x4e, 0x79, 0xb7, 0xef, 0x82, 0x95, 0xe2, 0x6c, 0xe7, 0x55, 0xc7, 0x9b, 0xf9, - 0x9e, 0x39, 0x5f, 0x6d, 0x1f, 0x20, 0x73, 0x0d, 0x0f, 0x2b, 0xac, 0x92, 0x83, 0xec, 0xd6, 0x4e, - 0xdb, 0x74, 0x83, 0x3a, 0xca, 0x21, 0x57, 0x9a, 0x9c, 0x64, 0xd6, 0x01, 0x98, 0xa4, 0x1b, 0xf6, - 0x35, 0xdb, 0x38, 0xc7, 0xb0, 0x47, 0x50, 0x4f, 0x9c, 0x60, 0x5b, 0xd0, 0x88, 0xcd, 0xc8, 0x58, - 0x13, 0xe2, 0x70, 0x15, 0x9e, 0x87, 0xb0, 0xb6, 0x8b, 0x9c, 0xe0, 0x54, 0x2c, 0xd5, 0x76, 0x1c, - 0x11, 0x6e, 0x14, 0xf6, 0x13, 0xa8, 0x10, 0x80, 0xdb, 0x2c, 0x56, 0x4e, 0xa4, 0x4c, 0x99, 0xa8, - 0xcb, 0x26, 0x19, 0xd3, 0xb0, 0xdd, 0x32, 0x26, 0x22, 0xd7, 0x04, 0xf6, 0x29, 0x16, 0x67, 0x13, - 0xb3, 0xa2, 0x57, 0xf1, 0x50, 0x6d, 0xff, 0x0c, 0xea, 0x09, 0x8c, 0x33, 0x7f, 0xe0, 0x05, 0xc2, - 0xb8, 0x48, 0x32, 0x96, 0xd7, 0xbd, 0x33, 0x27, 0x72, 0x5c, 0x25, 0x74, 0xb1, 0x51, 0xe1, 0x19, - 0x60, 0x7f, 0x02, 0x8d, 0xdc, 0xee, 0xc1, 0x74, 0x7b, 0x4c, 0x61, 0xd4, 0x7b, 0x58, 0x37, 0xec, - 0x3f, 0x60, 0xf1, 0x9f, 0xd4, 0x73, 0x3f, 0x01, 0x38, 0x53, 0x2a, 0x7c, 0x4a, 0x05, 0x9e, 0x59, - 0x7b, 0x0b, 0x11, 0x62, 0xb0, 0x4d, 0x68, 0x60, 0x23, 0x36, 0x7a, 0x9d, 0xef, 0xd4, 0x23, 0xd6, - 0x84, 0xff, 0x07, 0x6b, 0x9a, 0x76, 0x2f, 0x99, 0xd0, 0x25, 0xbd, 0x3f, 0x82, 0x7a, 0x20, 0x8d, - 0x4e, 0xd7, 0x9b, 0xb5, 0x40, 0xa6, 0xfd, 0x1c, 0xdf, 0x37, 0xba, 0x8a, 0xee, 0xe7, 0xf8, 0x3e, - 0x29, 0xed, 0x5b, 0xf0, 0x7f, 0xaf, 0x3c, 0x63, 0xd8, 0x07, 0x50, 0x9d, 0x7a, 0xbe, 0xa2, 0x1b, - 0x01, 0xeb, 0x5b, 0xd3, 0xb2, 0xff, 0x55, 0x00, 0xc8, 0xc2, 0x8e, 0xc9, 0x8c, 0x47, 0x3b, 0x72, - 0x9a, 0xfa, 0x28, 0xf7, 0xa1, 0x3e, 0x33, 0x87, 0x84, 0x09, 0xe8, 0x8d, 0xe5, 0x54, 0xe9, 0x24, - 0x67, 0x88, 0x3e, 0x3e, 0xf6, 0xcc, 0xf1, 0xf1, 0x2e, 0x4f, 0x8d, 0x74, 0x04, 0xaa, 0x72, 0xf2, - 0x4f, 0x46, 0xc8, 0x76, 0x21, 0x37, 0x9a, 0xf5, 0xfb, 0xb0, 0xba, 0x34, 0xe4, 0x5b, 0x5e, 0x18, - 0xd9, 0x61, 0x97, 0xdf, 0x82, 0x7b, 0x50, 0xd5, 0x6f, 0x4d, 0xb6, 0x0d, 0x35, 0xc7, 0xd5, 0xbb, - 0x2f, 0x77, 0x02, 0xa0, 0x72, 0x9f, 0x60, 0x9e, 0xa8, 0xed, 0xbf, 0x14, 0x01, 0x32, 0xfc, 0x1d, - 0x4a, 0xdd, 0xaf, 0x60, 0x2d, 0x16, 0xae, 0x0c, 0x26, 0x4e, 0xb4, 0x20, 0xad, 0x79, 0x53, 0x5d, - 0xd5, 0xe5, 0x12, 0x33, 0x57, 0xf6, 0x96, 0xde, 0x5c, 0xf6, 0x6e, 0x43, 0xd9, 0x95, 0xe1, 0xc2, - 0xdc, 0x0b, 0x6c, 0x79, 0x22, 0x3d, 0x19, 0x2e, 0xf0, 0x65, 0x8d, 0x0c, 0xd6, 0x81, 0xea, 0xec, - 0x9c, 0x5e, 0xdf, 0xfa, 0xe5, 0x72, 0x7d, 0x99, 0xfb, 0xf0, 0x1c, 0x65, 0x7c, 0xab, 0x6b, 0x16, - 0xbb, 0x05, 0x95, 0xd9, 0xf9, 0xc4, 0x8b, 0xa8, 0x60, 0x6e, 0xe8, 0x42, 0x30, 0x4f, 0xef, 0x7b, - 0x11, 0xbe, 0xc8, 0x89, 0xc3, 0x6c, 0x28, 0x46, 0x33, 0x7a, 0xbc, 0x34, 0xf4, 0xb3, 0x2c, 0xb7, - 0x9a, 0xb3, 0x83, 0x15, 0x5e, 0x8c, 0x66, 0xdd, 0x3a, 0x54, 0xf5, 0xba, 0xda, 0xff, 0x2c, 0xc1, - 0xda, 0xb2, 0x97, 0x18, 0xd9, 0x38, 0x72, 0x93, 0xc8, 0xc6, 0x91, 0x9b, 0xbe, 0x08, 0x8a, 0xb9, - 0x17, 0x81, 0x0d, 0x15, 0xf9, 0x3c, 0x10, 0x51, 0xfe, 0x37, 0x43, 0xef, 0x4c, 0x3e, 0x0f, 0xb0, - 0x2a, 0xd5, 0xaa, 0xa5, 0x22, 0xaf, 0x62, 0x8a, 0xbc, 0x4f, 0x61, 0x75, 0x2a, 0x7d, 0x5f, 0x3e, - 0x1f, 0x2f, 0x66, 0xbe, 0x17, 0x9c, 0x9b, 0x4a, 0x6f, 0x19, 0x64, 0xdb, 0x70, 0x6d, 0xe2, 0x45, - 0xe8, 0x4e, 0x4f, 0x06, 0x4a, 0x04, 0xf4, 0x70, 0x43, 0xde, 0x65, 0x98, 0x7d, 0x0d, 0x5b, 0x8e, - 0x52, 0x62, 0x16, 0xaa, 0x47, 0x41, 0xe8, 0xb8, 0xe7, 0x7d, 0xe9, 0xd2, 0x2e, 0x9c, 0x85, 0x8e, - 0xf2, 0x4e, 0x3c, 0x1f, 0xdf, 0xa8, 0x35, 0xea, 0xfa, 0x46, 0x1e, 0xfb, 0x0c, 0xd6, 0xdc, 0x48, - 0x38, 0x4a, 0xf4, 0x45, 0xac, 0x8e, 0x1c, 0x75, 0xd6, 0xae, 0x53, 0xcf, 0x4b, 0x28, 0xce, 0xc1, - 0x41, 0x6f, 0x9f, 0x78, 0xfe, 0xc4, 0xc5, 0xe7, 0x98, 0xa5, 0xe7, 0xb0, 0x04, 0xb2, 0x0e, 0x30, - 0x02, 0x06, 0xb3, 0x50, 0x2d, 0x52, 0x2a, 0x10, 0xf5, 0x0a, 0x0d, 0x9e, 0x93, 0xca, 0x9b, 0x89, - 0x58, 0x39, 0xb3, 0x90, 0x7e, 0x8f, 0x94, 0x78, 0x06, 0xb0, 0x9b, 0xd0, 0xf2, 0x02, 0xd7, 0x9f, - 0x4f, 0xc4, 0xd3, 0x10, 0x27, 0x12, 0x05, 0x71, 0xbb, 0x49, 0xa7, 0xca, 0x35, 0x83, 0x1f, 0x19, - 0x18, 0xa9, 0xe2, 0xe2, 0x12, 0x75, 0x55, 0x53, 0x0d, 0x9e, 0x50, 0xed, 0x6f, 0x0b, 0xd0, 0xba, - 0x9c, 0x78, 0x18, 0xb6, 0x10, 0x27, 0x6f, 0x1e, 0xa3, 0x28, 0xa7, 0xa1, 0x2c, 0xe6, 0x42, 0x99, - 0x5c, 0x73, 0xa5, 0xdc, 0x35, 0x97, 0xa6, 0x45, 0xf9, 0xf5, 0x69, 0xb1, 0x34, 0xd1, 0xca, 0xa5, - 0x89, 0xda, 0xbf, 0x2b, 0xc0, 0xb5, 0x4b, 0xc9, 0xfd, 0xd6, 0x1e, 0x6d, 0x41, 0x63, 0xe6, 0x9c, - 0x8b, 0x23, 0x27, 0xa2, 0x94, 0x29, 0xe9, 0x3a, 0x30, 0x07, 0xfd, 0x17, 0xfc, 0x0b, 0xa0, 0x99, - 0xdf, 0x51, 0x57, 0xfa, 0x96, 0x24, 0xc8, 0xa1, 0x54, 0xf7, 0xe4, 0xdc, 0x5c, 0xa1, 0x49, 0x82, - 0x24, 0xe0, 0xab, 0x69, 0x54, 0xba, 0x22, 0x8d, 0xec, 0x43, 0xa8, 0x27, 0x0e, 0xb2, 0x4d, 0xf3, - 0x27, 0xa4, 0x90, 0xfd, 0x91, 0x7b, 0x14, 0x8b, 0x08, 0x7d, 0xd7, 0xbf, 0x45, 0x3e, 0x86, 0xca, - 0x69, 0x24, 0xe7, 0xa1, 0x39, 0x83, 0x97, 0x18, 0x5a, 0x63, 0x8f, 0xa1, 0x66, 0x10, 0xb6, 0x03, - 0xd5, 0x93, 0x45, 0xfa, 0xdf, 0xc1, 0x1c, 0x17, 0xd8, 0x9e, 0x18, 0x06, 0x9e, 0x41, 0x9a, 0xc1, - 0xae, 0x43, 0xf9, 0x64, 0x31, 0xec, 0xeb, 0x57, 0x1d, 0x9e, 0x64, 0xd8, 0xea, 0x56, 0xb5, 0x43, - 0xf6, 0x03, 0x68, 0xe6, 0xfb, 0xe1, 0xa2, 0xe4, 0x2a, 0x23, 0x92, 0xb3, 0x23, 0xbb, 0xf8, 0x86, - 0x23, 0x7b, 0x67, 0x1b, 0x6a, 0xe6, 0x9f, 0x13, 0xb3, 0xa0, 0xf2, 0xe8, 0x70, 0x3c, 0x38, 0x6e, - 0xad, 0xb0, 0x3a, 0x94, 0x0f, 0x46, 0xe3, 0xe3, 0x56, 0x01, 0xa5, 0xc3, 0xd1, 0xe1, 0xa0, 0x55, - 0xdc, 0xb9, 0x09, 0xcd, 0xfc, 0x5f, 0x27, 0xd6, 0x80, 0xda, 0x78, 0xff, 0xb0, 0xdf, 0x1d, 0xfd, - 0xa2, 0xb5, 0xc2, 0x9a, 0x50, 0x1f, 0x1e, 0x8e, 0x07, 0xbd, 0x47, 0x7c, 0xd0, 0x2a, 0xec, 0x8c, - 0xc0, 0x4a, 0x7f, 0x32, 0xa0, 0x85, 0xee, 0xf0, 0xb0, 0xdf, 0x5a, 0x61, 0x00, 0xd5, 0xf1, 0xa0, - 0xc7, 0x07, 0x68, 0xb7, 0x06, 0xa5, 0xf1, 0xf8, 0xa0, 0x55, 0xc4, 0x51, 0x7b, 0xfb, 0xbd, 0x83, - 0x41, 0xab, 0x84, 0xe2, 0xf1, 0xc3, 0xa3, 0x7b, 0xe3, 0x56, 0x99, 0xad, 0x82, 0x85, 0x0e, 0x3c, - 0xa5, 0x9e, 0x93, 0x9d, 0x2f, 0xe0, 0xda, 0xa5, 0xd7, 0x38, 0x19, 0x3b, 0xd8, 0xe7, 0x03, 0x34, - 0xdc, 0x80, 0xda, 0x11, 0x1f, 0x3e, 0xde, 0x3f, 0x1e, 0xb4, 0x0a, 0xa8, 0x78, 0x30, 0xea, 0xdd, - 0x1f, 0xf4, 0x5b, 0xc5, 0xee, 0x8d, 0xef, 0x5e, 0x6c, 0x14, 0xbe, 0x7f, 0xb1, 0x51, 0xf8, 0xe1, - 0xc5, 0x46, 0xe1, 0xef, 0x2f, 0x36, 0x0a, 0xdf, 0xbe, 0xdc, 0x58, 0xf9, 0xfe, 0xe5, 0xc6, 0xca, - 0x0f, 0x2f, 0x37, 0x56, 0x4e, 0xaa, 0xf4, 0x4b, 0xf8, 0xf3, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, - 0xd3, 0x04, 0x14, 0x75, 0x52, 0x16, 0x00, 0x00, + // 2288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1c, 0xc7, + 0xf1, 0xe7, 0xbe, 0x77, 0x6b, 0x97, 0xd4, 0xfe, 0xdb, 0xb2, 0xbd, 0xe6, 0x5f, 0x21, 0xe9, 0x91, + 0x62, 0x50, 0x94, 0xb4, 0x04, 0x68, 0xc0, 0x32, 0x8c, 0x20, 0x00, 0xf7, 0x21, 0x70, 0x2d, 0x89, + 0x4b, 0xf4, 0xea, 0x91, 0x9b, 0x30, 0x9c, 0x69, 0x2e, 0x07, 0x9c, 0x9d, 0x1e, 0xcc, 0xf4, 0x4a, + 0xdc, 0x1c, 0x72, 0xf0, 0x27, 0x30, 0x10, 0x20, 0xb7, 0x24, 0xc8, 0x77, 0xc8, 0x35, 0x77, 0x1f, + 0x7d, 0xc8, 0xc1, 0xc8, 0xc1, 0x0e, 0xa4, 0x7b, 0x3e, 0x41, 0x02, 0x04, 0x55, 0xdd, 0xf3, 0x58, + 0x8a, 0x82, 0x24, 0x24, 0xc8, 0x69, 0xba, 0x7f, 0xf5, 0xeb, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, + 0x81, 0x86, 0x0c, 0xe3, 0x6e, 0x18, 0x49, 0x25, 0x59, 0x31, 0x3c, 0x5e, 0xbf, 0x33, 0xf5, 0xd4, + 0xe9, 0xfc, 0xb8, 0xeb, 0xc8, 0xd9, 0xee, 0x54, 0x4e, 0xe5, 0x2e, 0x89, 0x8e, 0xe7, 0x27, 0xd4, + 0xa3, 0x0e, 0xb5, 0xf4, 0x10, 0xeb, 0x4f, 0x45, 0x28, 0x8e, 0x43, 0xf6, 0x29, 0x54, 0xbd, 0x20, + 0x9c, 0xab, 0xb8, 0x53, 0xd8, 0x2a, 0x6d, 0x37, 0xf7, 0x1a, 0xdd, 0xf0, 0xb8, 0x3b, 0x42, 0x84, + 0x1b, 0x01, 0xdb, 0x82, 0xb2, 0x38, 0x17, 0x4e, 0xa7, 0xb8, 0x55, 0xd8, 0x6e, 0xee, 0x01, 0x12, + 0x86, 0xe7, 0xc2, 0x19, 0x87, 0x07, 0x2b, 0x9c, 0x24, 0xec, 0x33, 0xa8, 0xc6, 0x72, 0x1e, 0x39, + 0xa2, 0x53, 0x22, 0x4e, 0x0b, 0x39, 0x13, 0x42, 0x88, 0x65, 0xa4, 0xa8, 0xe9, 0xc4, 0xf3, 0x45, + 0xa7, 0x9c, 0x69, 0xba, 0xe7, 0xf9, 0x9a, 0x43, 0x12, 0x76, 0x1d, 0x2a, 0xc7, 0x73, 0xcf, 0x77, + 0x3b, 0x15, 0xa2, 0x34, 0x91, 0xd2, 0x43, 0x80, 0x38, 0x5a, 0xc6, 0xb6, 0xa1, 0x1e, 0xfa, 0xb6, + 0x3a, 0x91, 0xd1, 0xac, 0x03, 0xd9, 0x84, 0x47, 0x06, 0xe3, 0xa9, 0x94, 0xdd, 0x85, 0xa6, 0x23, + 0x83, 0x58, 0x45, 0xb6, 0x17, 0xa8, 0xb8, 0xd3, 0x24, 0xf2, 0x87, 0x48, 0x7e, 0x2a, 0xa3, 0x33, + 0x11, 0xf5, 0x33, 0x21, 0xcf, 0x33, 0x7b, 0x65, 0x28, 0xca, 0xd0, 0xfa, 0x5d, 0x01, 0xea, 0x89, + 0x56, 0x66, 0x41, 0x6b, 0x3f, 0x72, 0x4e, 0x3d, 0x25, 0x1c, 0x35, 0x8f, 0x44, 0xa7, 0xb0, 0x55, + 0xd8, 0x6e, 0xf0, 0x25, 0x8c, 0xad, 0x41, 0x71, 0x3c, 0x21, 0x47, 0x35, 0x78, 0x71, 0x3c, 0x61, + 0x1d, 0xa8, 0x3d, 0xb1, 0x23, 0xcf, 0x0e, 0x14, 0x79, 0xa6, 0xc1, 0x93, 0x2e, 0xbb, 0x06, 0x8d, + 0xf1, 0xe4, 0x89, 0x88, 0x62, 0x4f, 0x06, 0xe4, 0x8f, 0x06, 0xcf, 0x00, 0xb6, 0x01, 0x30, 0x9e, + 0xdc, 0x13, 0x36, 0x2a, 0x8d, 0x3b, 0x95, 0xad, 0xd2, 0x76, 0x83, 0xe7, 0x10, 0xeb, 0x37, 0x50, + 0xa1, 0x3d, 0x62, 0x5f, 0x43, 0xd5, 0xf5, 0xa6, 0x22, 0x56, 0xda, 0x9c, 0xde, 0xde, 0x77, 0x3f, + 0x6e, 0xae, 0xfc, 0xed, 0xc7, 0xcd, 0x9d, 0x5c, 0x30, 0xc8, 0x50, 0x04, 0x8e, 0x0c, 0x94, 0xed, + 0x05, 0x22, 0x8a, 0x77, 0xa7, 0xf2, 0x8e, 0x1e, 0xd2, 0x1d, 0xd0, 0x87, 0x1b, 0x0d, 0xec, 0x26, + 0x54, 0xbc, 0xc0, 0x15, 0xe7, 0x64, 0x7f, 0xa9, 0xf7, 0x81, 0x51, 0xd5, 0x1c, 0xcf, 0x55, 0x38, + 0x57, 0x23, 0x14, 0x71, 0xcd, 0xb0, 0xfe, 0x50, 0x80, 0xaa, 0x8e, 0x01, 0x76, 0x0d, 0xca, 0x33, + 0xa1, 0x6c, 0x9a, 0xbf, 0xb9, 0x57, 0x47, 0xdf, 0x3e, 0x14, 0xca, 0xe6, 0x84, 0x62, 0x78, 0xcd, + 0xe4, 0x1c, 0x7d, 0x5f, 0xcc, 0xc2, 0xeb, 0x21, 0x22, 0xdc, 0x08, 0xd8, 0xcf, 0xa1, 0x16, 0x08, + 0xf5, 0x42, 0x46, 0x67, 0xe4, 0xa3, 0x35, 0xbd, 0xe9, 0x87, 0x42, 0x3d, 0x94, 0xae, 0xe0, 0x89, + 0x8c, 0xdd, 0x86, 0x7a, 0x2c, 0x9c, 0x79, 0xe4, 0xa9, 0x05, 0xf9, 0x6b, 0x6d, 0xaf, 0x4d, 0x51, + 0x66, 0x30, 0x22, 0xa7, 0x0c, 0xeb, 0xa7, 0x02, 0x94, 0xd1, 0x0c, 0xc6, 0xa0, 0x6c, 0x47, 0x53, + 0x1d, 0xdd, 0x0d, 0x4e, 0x6d, 0xd6, 0x86, 0x92, 0x08, 0x9e, 0x93, 0x45, 0x0d, 0x8e, 0x4d, 0x44, + 0x9c, 0x17, 0xae, 0xd9, 0x23, 0x6c, 0xe2, 0xb8, 0x79, 0x2c, 0x22, 0xb3, 0x35, 0xd4, 0x66, 0x37, + 0xa1, 0x11, 0x46, 0xf2, 0x7c, 0xf1, 0x0c, 0x47, 0x57, 0x72, 0x81, 0x87, 0xe0, 0x30, 0x78, 0xce, + 0xeb, 0xa1, 0x69, 0xb1, 0x1d, 0x00, 0x71, 0xae, 0x22, 0xfb, 0x40, 0xc6, 0x2a, 0xee, 0x54, 0x69, + 0xed, 0x14, 0xef, 0x08, 0x8c, 0x8e, 0x78, 0x4e, 0xca, 0xd6, 0xa1, 0x7e, 0x2a, 0x63, 0x15, 0xd8, + 0x33, 0xd1, 0xa9, 0xd1, 0x74, 0x69, 0x1f, 0x03, 0x28, 0x3e, 0x9d, 0x4d, 0xbc, 0x5f, 0x8b, 0x4e, + 0x1d, 0x77, 0x85, 0x27, 0x5d, 0xeb, 0x1f, 0x45, 0xa8, 0x90, 0x23, 0xd9, 0x36, 0xee, 0x5b, 0x38, + 0xd7, 0x21, 0x50, 0xea, 0x31, 0xb3, 0x6f, 0x40, 0x11, 0x92, 0x6e, 0x1b, 0x46, 0xcb, 0x3a, 0xfa, + 0xd0, 0x17, 0x8e, 0x92, 0x91, 0x09, 0xd2, 0xb4, 0x8f, 0x0b, 0x76, 0x31, 0x8e, 0xb4, 0x0f, 0xa8, + 0xcd, 0x6e, 0x41, 0x55, 0xd2, 0xe6, 0x93, 0x1b, 0xde, 0x10, 0x12, 0x86, 0x82, 0xca, 0x23, 0x61, + 0xbb, 0x32, 0xf0, 0x17, 0xe4, 0x9c, 0x3a, 0x4f, 0xfb, 0xec, 0x16, 0x34, 0x68, 0xb7, 0x1f, 0x2d, + 0x42, 0xd1, 0xa9, 0xd2, 0xee, 0xad, 0xa6, 0x91, 0x80, 0x20, 0xcf, 0xe4, 0x78, 0xbc, 0x1d, 0xdb, + 0x39, 0x15, 0xe3, 0x50, 0x75, 0xae, 0x66, 0x5e, 0xee, 0x1b, 0x8c, 0xa7, 0x52, 0x54, 0x1b, 0x0b, + 0x27, 0x12, 0x0a, 0xa9, 0x1f, 0x12, 0x75, 0xd5, 0x04, 0x85, 0x06, 0x79, 0x26, 0x67, 0x16, 0x54, + 0x27, 0x93, 0x03, 0x64, 0x7e, 0x94, 0xa5, 0x1f, 0x8d, 0x70, 0x23, 0xd1, 0x6b, 0x88, 0xe7, 0xbe, + 0x1a, 0x0d, 0x3a, 0x1f, 0x6b, 0x07, 0x25, 0x7d, 0x6b, 0x04, 0xf5, 0xc4, 0x04, 0x3c, 0xe7, 0xa3, + 0x81, 0xc9, 0x00, 0xc5, 0xd1, 0x80, 0xdd, 0xc1, 0x6d, 0xb2, 0x23, 0x2f, 0x98, 0x92, 0x5f, 0xd7, + 0xf6, 0x3e, 0x48, 0x2d, 0x9e, 0x68, 0x1c, 0x67, 0x49, 0x38, 0x96, 0x84, 0x46, 0x6a, 0xe2, 0x6b, + 0xba, 0xda, 0x50, 0x9a, 0x7b, 0x2e, 0xe9, 0x59, 0xe5, 0xd8, 0x44, 0x64, 0xea, 0xe9, 0xe8, 0x5c, + 0xe5, 0xd8, 0xc4, 0xcd, 0x9a, 0x49, 0x57, 0x27, 0xd2, 0x55, 0x4e, 0x6d, 0xb4, 0x5d, 0x86, 0xca, + 0x93, 0x81, 0xed, 0x27, 0xfe, 0x4f, 0xfa, 0x96, 0x9f, 0xac, 0xfd, 0x7f, 0x32, 0xdb, 0x6f, 0x0b, + 0x50, 0x4f, 0xb2, 0x3f, 0xa6, 0x32, 0xcf, 0x15, 0x81, 0xf2, 0x4e, 0x3c, 0x11, 0x99, 0x89, 0x73, + 0x08, 0xbb, 0x03, 0x15, 0x5b, 0xa9, 0x28, 0x49, 0x10, 0x1f, 0xe7, 0xaf, 0x8e, 0xee, 0x3e, 0x4a, + 0x86, 0x81, 0x8a, 0x16, 0x5c, 0xb3, 0xd6, 0xbf, 0x04, 0xc8, 0x40, 0xb4, 0xf5, 0x4c, 0x2c, 0x8c, + 0x56, 0x6c, 0xb2, 0xab, 0x50, 0x79, 0x6e, 0xfb, 0x73, 0x61, 0xe2, 0x5b, 0x77, 0xbe, 0x2a, 0x7e, + 0x59, 0xb0, 0xfe, 0x52, 0x84, 0x9a, 0xb9, 0x4a, 0xd8, 0x6d, 0xa8, 0xd1, 0x55, 0x62, 0x2c, 0xba, + 0xfc, 0xd0, 0x24, 0x14, 0xb6, 0x9b, 0xde, 0x91, 0x39, 0x1b, 0x8d, 0x2a, 0x7d, 0x57, 0x1a, 0x1b, + 0xb3, 0x1b, 0xb3, 0xe4, 0x8a, 0x13, 0x73, 0x19, 0xae, 0x21, 0x7b, 0x20, 0x4e, 0xbc, 0xc0, 0x43, + 0xff, 0x70, 0x14, 0xb1, 0xdb, 0xc9, 0xaa, 0xcb, 0xa4, 0xf1, 0xa3, 0xbc, 0xc6, 0xd7, 0x17, 0x3d, + 0x82, 0x66, 0x6e, 0x9a, 0x4b, 0x56, 0x7d, 0x23, 0xbf, 0x6a, 0x33, 0x25, 0xa9, 0xd3, 0x37, 0x79, + 0xe6, 0x85, 0xff, 0xc0, 0x7f, 0x5f, 0x00, 0x64, 0x2a, 0xdf, 0x3d, 0xe9, 0x58, 0xdf, 0x94, 0x00, + 0xc6, 0x21, 0x26, 0x63, 0xd7, 0xa6, 0x1b, 0xa1, 0xe5, 0x4d, 0x03, 0x19, 0x89, 0x67, 0x74, 0x8c, + 0x69, 0x7c, 0x9d, 0x37, 0x35, 0x46, 0x27, 0x86, 0xed, 0x43, 0xd3, 0x15, 0xb1, 0x13, 0x79, 0x14, + 0x50, 0xc6, 0xe9, 0x9b, 0xb8, 0xa6, 0x4c, 0x4f, 0x77, 0x90, 0x31, 0xb4, 0xaf, 0xf2, 0x63, 0xd8, + 0x1e, 0xb4, 0xc4, 0x79, 0x28, 0x23, 0x65, 0x66, 0xd1, 0x15, 0xc7, 0x15, 0x5d, 0xbb, 0x20, 0x4e, + 0x33, 0xf1, 0xa6, 0xc8, 0x3a, 0xcc, 0x86, 0xb2, 0x63, 0x87, 0xfa, 0xba, 0x6d, 0xee, 0x75, 0x2e, + 0xcc, 0xd7, 0xb7, 0x43, 0xed, 0xb4, 0xde, 0xe7, 0xb8, 0xd6, 0x6f, 0x7e, 0xda, 0xbc, 0x95, 0xbb, + 0x63, 0x67, 0xf2, 0x78, 0xb1, 0x4b, 0xf1, 0x72, 0xe6, 0xa9, 0xdd, 0xb9, 0xf2, 0xfc, 0x5d, 0x3b, + 0xf4, 0x50, 0x1d, 0x0e, 0x1c, 0x0d, 0x38, 0xa9, 0x5e, 0xff, 0x25, 0xb4, 0x2f, 0xda, 0xfd, 0x3e, + 0x7b, 0xb0, 0x7e, 0x17, 0x1a, 0xa9, 0x1d, 0x6f, 0x1b, 0x58, 0xcf, 0x6f, 0xde, 0x9f, 0x0b, 0x50, + 0xd5, 0xa7, 0x8a, 0xdd, 0x85, 0x86, 0x2f, 0x1d, 0x1b, 0x0d, 0x48, 0x8a, 0xbe, 0x4f, 0xb2, 0x43, + 0xd7, 0x7d, 0x90, 0xc8, 0xb4, 0x57, 0x33, 0x2e, 0x06, 0x99, 0x17, 0x9c, 0xc8, 0xe4, 0x14, 0xac, + 0x65, 0x83, 0x46, 0xc1, 0x89, 0xe4, 0x5a, 0xb8, 0x7e, 0x1f, 0xd6, 0x96, 0x55, 0x5c, 0x62, 0xe7, + 0xf5, 0xe5, 0x70, 0xa5, 0x9c, 0x9d, 0x0e, 0xca, 0x9b, 0x7d, 0x17, 0x1a, 0x29, 0xce, 0x76, 0x5e, + 0x37, 0xbc, 0x95, 0x1f, 0x99, 0xb3, 0xd5, 0xf2, 0x01, 0x32, 0xd3, 0x30, 0x59, 0x61, 0x75, 0x49, + 0x37, 0xac, 0x36, 0x23, 0xed, 0xd3, 0xbd, 0x67, 0x2b, 0x9b, 0x4c, 0x69, 0x71, 0x6a, 0xb3, 0x2e, + 0x80, 0x9b, 0x1e, 0xd8, 0x37, 0x1c, 0xe3, 0x1c, 0xc3, 0x1a, 0x43, 0x3d, 0x31, 0x82, 0x6d, 0x41, + 0x33, 0x36, 0x33, 0x63, 0x2d, 0x85, 0xd3, 0x55, 0x78, 0x1e, 0xc2, 0x9a, 0x28, 0xb2, 0x83, 0xa9, + 0x58, 0xaa, 0x89, 0x38, 0x22, 0xdc, 0x08, 0xac, 0xa7, 0x50, 0x21, 0x00, 0x8f, 0x59, 0xac, 0xec, + 0x48, 0x99, 0xf2, 0x4a, 0x97, 0x1b, 0x32, 0xa6, 0x69, 0x7b, 0x65, 0x0c, 0x44, 0xae, 0x09, 0xec, + 0x06, 0x16, 0x35, 0xae, 0xf1, 0xe8, 0x65, 0x3c, 0x14, 0x5b, 0xbf, 0x80, 0x7a, 0x02, 0xe3, 0xca, + 0x1f, 0x78, 0x81, 0x30, 0x26, 0x52, 0x1b, 0xcb, 0xd2, 0xfe, 0xa9, 0x1d, 0xd9, 0x8e, 0x12, 0xba, + 0x44, 0xa8, 0xf0, 0x0c, 0xb0, 0xae, 0x43, 0x33, 0x77, 0x7a, 0x30, 0xdc, 0x9e, 0xd0, 0x36, 0xea, + 0x33, 0xac, 0x3b, 0xd6, 0x1f, 0xb1, 0x68, 0x4e, 0xea, 0xa0, 0x9f, 0x01, 0x9c, 0x2a, 0x15, 0x3e, + 0xa3, 0xc2, 0xc8, 0xf8, 0xbe, 0x81, 0x08, 0x31, 0xd8, 0x26, 0x34, 0xb1, 0x13, 0x1b, 0xb9, 0x8e, + 0x77, 0x1a, 0x11, 0x6b, 0xc2, 0xff, 0x43, 0xe3, 0x24, 0x1d, 0x5e, 0x32, 0x5b, 0x97, 0x8c, 0xfe, + 0x04, 0xea, 0x81, 0x34, 0x32, 0x5d, 0xa7, 0xd5, 0x02, 0x99, 0x8e, 0xb3, 0x7d, 0xdf, 0xc8, 0x2a, + 0x7a, 0x9c, 0xed, 0xfb, 0x24, 0xb4, 0x6e, 0xc1, 0xff, 0xbd, 0x56, 0xfe, 0xb3, 0x8f, 0xa0, 0x7a, + 0xe2, 0xf9, 0x8a, 0x6e, 0x04, 0xac, 0x0b, 0x4d, 0xcf, 0xfa, 0x57, 0x01, 0x20, 0xdb, 0x76, 0x0c, + 0x66, 0x4c, 0xed, 0xc8, 0x69, 0xe9, 0x54, 0xee, 0x43, 0x7d, 0x66, 0x92, 0x84, 0xd9, 0xd0, 0x6b, + 0xcb, 0xa1, 0xd2, 0x4d, 0x72, 0x88, 0x4e, 0x1f, 0x7b, 0x26, 0x7d, 0xbc, 0x4f, 0x89, 0x9e, 0xce, + 0x40, 0x55, 0x4c, 0xfe, 0xa9, 0x05, 0xd9, 0x29, 0xe4, 0x46, 0xb2, 0x7e, 0x1f, 0x56, 0x97, 0xa6, + 0x7c, 0xc7, 0x0b, 0x23, 0x4b, 0x76, 0xf9, 0x23, 0x78, 0x1b, 0xaa, 0xba, 0x66, 0xc5, 0x78, 0xc1, + 0x96, 0x51, 0x43, 0x6d, 0x2a, 0x27, 0x8e, 0x92, 0x07, 0xcf, 0xe8, 0xc8, 0xda, 0x83, 0xaa, 0x7e, + 0xd1, 0xb1, 0x6d, 0xa8, 0xd9, 0x8e, 0x3e, 0xab, 0xb9, 0x7c, 0x81, 0xc2, 0x7d, 0x82, 0x79, 0x22, + 0xb6, 0xfe, 0x5a, 0x04, 0xc8, 0xf0, 0xf7, 0x28, 0x67, 0xbf, 0x82, 0xb5, 0x58, 0x38, 0x32, 0x70, + 0xed, 0x68, 0x41, 0x52, 0xf3, 0x72, 0xb9, 0x6c, 0xc8, 0x05, 0x66, 0xae, 0xb4, 0x2d, 0xbd, 0xbd, + 0xb4, 0xdd, 0x86, 0xb2, 0x23, 0xc3, 0x85, 0xb9, 0x45, 0xd8, 0xf2, 0x42, 0xfa, 0x32, 0x5c, 0xe0, + 0xfb, 0x15, 0x19, 0xac, 0x0b, 0xd5, 0xd9, 0x19, 0xbd, 0x71, 0xf5, 0xfb, 0xe0, 0xea, 0x32, 0xf7, + 0xe1, 0x19, 0xb6, 0xf1, 0x45, 0xac, 0x59, 0xec, 0x16, 0x54, 0x66, 0x67, 0xae, 0x17, 0x51, 0x51, + 0xdc, 0xd4, 0x65, 0x63, 0x9e, 0x3e, 0xf0, 0x22, 0x7c, 0xf7, 0x12, 0x87, 0x59, 0x50, 0x8c, 0x66, + 0xf4, 0x44, 0x68, 0xea, 0xc7, 0x4f, 0xce, 0x9b, 0xb3, 0x83, 0x15, 0x5e, 0x8c, 0x66, 0xbd, 0x3a, + 0x54, 0xb5, 0x5f, 0xad, 0x7f, 0x96, 0x60, 0x6d, 0xd9, 0x4a, 0x8c, 0x83, 0x38, 0x72, 0x92, 0x38, + 0x88, 0x23, 0x27, 0xad, 0xfa, 0x8b, 0xb9, 0xaa, 0xdf, 0x82, 0x8a, 0x7c, 0x11, 0x88, 0x28, 0xff, + 0x98, 0xef, 0x9f, 0xca, 0x17, 0x01, 0xd6, 0xb0, 0x5a, 0xb4, 0x54, 0x12, 0x56, 0x4c, 0x49, 0x78, + 0x03, 0x56, 0x4f, 0xa4, 0xef, 0xcb, 0x17, 0x93, 0xc5, 0xcc, 0xf7, 0x82, 0x33, 0x53, 0x17, 0x2e, + 0x83, 0x6c, 0x1b, 0xae, 0xb8, 0x5e, 0x84, 0xe6, 0xf4, 0x65, 0xa0, 0x44, 0x40, 0xcf, 0x23, 0xe4, + 0x5d, 0x84, 0xd9, 0xd7, 0xb0, 0x65, 0x2b, 0x25, 0x66, 0xa1, 0x7a, 0x1c, 0x84, 0xb6, 0x73, 0x36, + 0x90, 0x0e, 0x9d, 0xd9, 0x59, 0x68, 0x2b, 0xef, 0xd8, 0xf3, 0xf1, 0x25, 0x58, 0xa3, 0xa1, 0x6f, + 0xe5, 0xb1, 0xcf, 0x60, 0xcd, 0x89, 0x84, 0xad, 0xc4, 0x40, 0xc4, 0xea, 0xc8, 0x56, 0xa7, 0xf4, + 0x9c, 0xaa, 0xf3, 0x0b, 0x28, 0xae, 0xc1, 0x46, 0x6b, 0x9f, 0x7a, 0xbe, 0xeb, 0xd8, 0x91, 0xdb, + 0x69, 0xe8, 0x35, 0x2c, 0x81, 0xac, 0x0b, 0x8c, 0x80, 0xe1, 0x2c, 0x54, 0x8b, 0x94, 0x0a, 0x44, + 0xbd, 0x44, 0x82, 0x59, 0x55, 0x79, 0x33, 0x11, 0x2b, 0x7b, 0x16, 0xd2, 0x4f, 0x88, 0x12, 0xcf, + 0x00, 0x76, 0x13, 0xda, 0x5e, 0xe0, 0xf8, 0x73, 0x57, 0x3c, 0x0b, 0x71, 0x21, 0x51, 0x10, 0x77, + 0x5a, 0x94, 0x83, 0xae, 0x18, 0xfc, 0xc8, 0xc0, 0x48, 0x15, 0xe7, 0x17, 0xa8, 0xab, 0x9a, 0x6a, + 0xf0, 0x84, 0x6a, 0x7d, 0x5b, 0x80, 0xf6, 0xc5, 0xc0, 0xc3, 0x6d, 0x0b, 0x71, 0xf1, 0xe6, 0x08, + 0x63, 0x3b, 0xdd, 0xca, 0x62, 0x6e, 0x2b, 0x93, 0x4b, 0xb1, 0x94, 0xbb, 0x14, 0xd3, 0xb0, 0x28, + 0xbf, 0x39, 0x2c, 0x96, 0x16, 0x5a, 0xb9, 0xb0, 0x50, 0xeb, 0xf7, 0x05, 0xb8, 0x72, 0x21, 0xb8, + 0xdf, 0xd9, 0xa2, 0x2d, 0x68, 0xce, 0xec, 0x33, 0x71, 0x64, 0x47, 0x14, 0x32, 0x25, 0x5d, 0x35, + 0xe6, 0xa0, 0xff, 0x82, 0x7d, 0x01, 0xb4, 0xf2, 0x27, 0xea, 0x52, 0xdb, 0x92, 0x00, 0x39, 0x94, + 0xea, 0x9e, 0x9c, 0x9b, 0x0b, 0x37, 0x09, 0x90, 0x04, 0x7c, 0x3d, 0x8c, 0x4a, 0x97, 0x84, 0x91, + 0x75, 0x08, 0xf5, 0xc4, 0x40, 0xb6, 0x69, 0xfe, 0x37, 0x14, 0xb2, 0xff, 0x5e, 0x8f, 0x63, 0x11, + 0xa1, 0xed, 0xfa, 0xe7, 0xc3, 0xa7, 0x50, 0x99, 0x46, 0x72, 0x1e, 0x9a, 0x8c, 0xbd, 0xc4, 0xd0, + 0x12, 0x6b, 0x02, 0x35, 0x83, 0xb0, 0x1d, 0xa8, 0x1e, 0x2f, 0x0e, 0x93, 0x7a, 0xc7, 0xa4, 0x0b, + 0xec, 0xbb, 0x86, 0x81, 0x39, 0x48, 0x33, 0xd8, 0x55, 0x28, 0x1f, 0x2f, 0x46, 0x03, 0xfd, 0x06, + 0xc4, 0x4c, 0x86, 0xbd, 0x5e, 0x55, 0x1b, 0x64, 0x3d, 0x80, 0x56, 0x7e, 0x1c, 0x3a, 0x25, 0x57, + 0x47, 0x51, 0x3b, 0x4b, 0xd9, 0xc5, 0xb7, 0xa4, 0xec, 0x9d, 0x6d, 0xa8, 0x99, 0x3f, 0x3b, 0xac, + 0x01, 0x95, 0xc7, 0x87, 0x93, 0xe1, 0xa3, 0xf6, 0x0a, 0xab, 0x43, 0xf9, 0x60, 0x3c, 0x79, 0xd4, + 0x2e, 0x60, 0xeb, 0x70, 0x7c, 0x38, 0x6c, 0x17, 0x77, 0x6e, 0x42, 0x2b, 0xff, 0x6f, 0x87, 0x35, + 0xa1, 0x36, 0xd9, 0x3f, 0x1c, 0xf4, 0xc6, 0xbf, 0x6a, 0xaf, 0xb0, 0x16, 0xd4, 0x47, 0x87, 0x93, + 0x61, 0xff, 0x31, 0x1f, 0xb6, 0x0b, 0x3b, 0x63, 0x68, 0xa4, 0x3f, 0x12, 0x50, 0x43, 0x6f, 0x74, + 0x38, 0x68, 0xaf, 0x30, 0x80, 0xea, 0x64, 0xd8, 0xe7, 0x43, 0xd4, 0x5b, 0x83, 0xd2, 0x64, 0x72, + 0xd0, 0x2e, 0xe2, 0xac, 0xfd, 0xfd, 0xfe, 0xc1, 0xb0, 0x5d, 0xc2, 0xe6, 0xa3, 0x87, 0x47, 0xf7, + 0x26, 0xed, 0x32, 0x5b, 0x85, 0x06, 0x1a, 0xf0, 0x8c, 0x46, 0xba, 0x3b, 0x5f, 0xc0, 0x95, 0x0b, + 0x6f, 0x77, 0x52, 0x76, 0xb0, 0xcf, 0x87, 0xa8, 0xb8, 0x09, 0xb5, 0x23, 0x3e, 0x7a, 0xb2, 0xff, + 0x68, 0xd8, 0x2e, 0xa0, 0xe0, 0xc1, 0xb8, 0x7f, 0x7f, 0x38, 0x68, 0x17, 0x7b, 0xd7, 0xbe, 0x7b, + 0xb9, 0x51, 0xf8, 0xfe, 0xe5, 0x46, 0xe1, 0x87, 0x97, 0x1b, 0x85, 0xbf, 0xbf, 0xdc, 0x28, 0x7c, + 0xfb, 0x6a, 0x63, 0xe5, 0xfb, 0x57, 0x1b, 0x2b, 0x3f, 0xbc, 0xda, 0x58, 0x39, 0xae, 0xd2, 0x8f, + 0xd7, 0xcf, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x67, 0x69, 0x19, 0xa9, 0xb8, 0x15, 0x00, 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -2955,19 +2843,10 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Ulimit) > 0 { - for iNdEx := len(m.Ulimit) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ulimit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } + if m.ShmSize != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.ShmSize)) + i-- + dAtA[i] = 0x40 } if len(m.Hostname) > 0 { i -= len(m.Hostname) @@ -3037,7 +2916,7 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostIP) Marshal() (dAtA []byte, err error) { +func (m *Mount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3047,101 +2926,24 @@ func (m *HostIP) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) + if len(m.ResultID) > 0 { + i -= len(m.ResultID) + copy(dAtA[i:], m.ResultID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID))) i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) + dAtA[i] = 0x1 i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Ulimit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ulimit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ulimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Hard != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Hard)) - i-- - dAtA[i] = 0x18 - } - if m.Soft != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Soft)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResultID) > 0 { - i -= len(m.ResultID) - copy(dAtA[i:], m.ResultID) - i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + dAtA[i] = 0xba } if m.SSHOpt != nil { { @@ -3185,20 +2987,6 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa2 } - if m.TmpfsOpt != nil { - { - size, err := m.TmpfsOpt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } if m.MountType != 0 { i = encodeVarintOps(dAtA, i, uint64(m.MountType)) i-- @@ -3241,34 +3029,6 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TmpfsOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TmpfsOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TmpfsOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Size_ != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *CacheOpt) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4155,6 +3915,43 @@ func (m *Definition) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *FileOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4975,47 +4772,8 @@ func (m *Meta) Size() (n int) { if l > 0 { n += 1 + l + sovOps(uint64(l)) } - if len(m.Ulimit) > 0 { - for _, e := range m.Ulimit { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *HostIP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Ulimit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Soft != 0 { - n += 1 + sovOps(uint64(m.Soft)) - } - if m.Hard != 0 { - n += 1 + sovOps(uint64(m.Hard)) + if m.ShmSize != 0 { + n += 1 + sovOps(uint64(m.ShmSize)) } return n } @@ -5046,10 +4804,6 @@ func (m *Mount) Size() (n int) { if m.MountType != 0 { n += 1 + sovOps(uint64(m.MountType)) } - if m.TmpfsOpt != nil { - l = m.TmpfsOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } if m.CacheOpt != nil { l = m.CacheOpt.Size() n += 2 + l + sovOps(uint64(l)) @@ -5069,18 +4823,6 @@ func (m *Mount) Size() (n int) { return n } -func (m *TmpfsOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Size_ != 0 { - n += 1 + sovOps(uint64(m.Size_)) - } - return n -} - func (m *CacheOpt) Size() (n int) { if m == nil { return 0 @@ -5443,6 +5185,23 @@ func (m *Definition) Size() (n int) { return n } +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + func (m *FileOp) Size() (n int) { if m == nil { return 0 @@ -6677,284 +6436,20 @@ func (m *Meta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthOps } if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProxyEnv == nil { - m.ProxyEnv = &ProxyEnv{} - } - if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) - if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ulimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ulimit = append(m.Ulimit, &Ulimit{}) - if err := m.Ulimit[len(m.Ulimit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostIP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostIP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ulimit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ulimit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ulimit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + return io.ErrUnexpectedEOF + } + if m.ProxyEnv == nil { + m.ProxyEnv = &ProxyEnv{} + } + if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6964,29 +6459,31 @@ func (m *Ulimit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthOps } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) + if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - m.Soft = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6996,16 +6493,29 @@ func (m *Ulimit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Soft |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 3: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShmSize", wireType) } - m.Hard = 0 + m.ShmSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -7015,7 +6525,7 @@ func (m *Ulimit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Hard |= int64(b&0x7F) << shift + m.ShmSize |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7211,42 +6721,6 @@ func (m *Mount) Unmarshal(dAtA []byte) error { break } } - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TmpfsOpt == nil { - m.TmpfsOpt = &TmpfsOpt{} - } - if err := m.TmpfsOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 20: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) @@ -7408,75 +6882,6 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } return nil } -func (m *TmpfsOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TmpfsOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TmpfsOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *CacheOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -10248,6 +9653,120 @@ func (m *Definition) Unmarshal(dAtA []byte) error { } return nil } +func (m *HostIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FileOp) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index 83296a1ee7fd..49e6eb74650f 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -58,18 +58,7 @@ message Meta { ProxyEnv proxy_env = 5; repeated HostIP extraHosts = 6; string hostname = 7; - repeated Ulimit ulimit = 9; -} - -message HostIP { - string Host = 1; - string IP = 2; -} - -message Ulimit { - string Name = 1; - int64 Soft = 2; - int64 Hard = 3; + int64 shmSize = 8; } enum NetMode { @@ -91,7 +80,6 @@ message Mount { int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; bool readonly = 5; MountType mountType = 6; - TmpfsOpt TmpfsOpt = 19; CacheOpt cacheOpt = 20; SecretOpt secretOpt = 21; SSHOpt SSHOpt = 22; @@ -108,12 +96,6 @@ enum MountType { HOST_BIND = 100; // Earthly specific. } -// TmpfsOpt defines options describing tpmfs mounts -message TmpfsOpt { - // Specify an upper limit on the size of the filesystem. - int64 size = 1; -} - // CacheOpt defines options specific to cache mounts message CacheOpt { // ID is an optional namespace for the mount @@ -263,6 +245,11 @@ message Definition { Source Source = 3; } +message HostIP { + string Host = 1; + string IP = 2; +} + message FileOp { repeated FileAction actions = 2; } diff --git a/source/git/gitsource.go b/source/git/gitsource.go index 914793ffc095..9e140f9296ea 100644 --- a/source/git/gitsource.go +++ b/source/git/gitsource.go @@ -630,6 +630,7 @@ func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...strin cmd.Stderr = io.MultiWriter(stderr, errbuf) cmd.Env = []string{ "PATH=" + os.Getenv("PATH"), + "HOME=" + os.Getenv("HOME"), // earthly needs this for git to read /root/.gitconfig "GIT_TERMINAL_PROMPT=0", "GIT_SSH_COMMAND=" + getGitSSHCommand(knownHosts), // "GIT_TRACE=1", diff --git a/source/git/redact_credentials_go114.go b/source/git/redact_credentials_go114.go deleted file mode 100644 index b2aa31404279..000000000000 --- a/source/git/redact_credentials_go114.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !go1.15 - -package git - -import "net/url" - -// redactCredentials takes a URL and redacts a password from it. -// e.g. "https://user:password@github.com/user/private-repo-failure.git" will be changed to -// "https://user:xxxxx@github.com/user/private-repo-failure.git" -func redactCredentials(s string) string { - u, err := url.Parse(s) - if err != nil { - return s // string is not a URL, just return it - } - - return urlRedacted(u) -} - -// urlRedacted comes from go's url.Redacted() which isn't available on go < 1.15 -func urlRedacted(u *url.URL) string { - if u == nil { - return "" - } - - ru := *u - if _, has := ru.User.Password(); has { - ru.User = url.UserPassword(ru.User.Username(), "xxxxx") - } - return ru.String() -} diff --git a/source/local/local.go b/source/local/local.go index b3f532e70aad..63882364a354 100644 --- a/source/local/local.go +++ b/source/local/local.go @@ -171,12 +171,8 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal OverrideExcludes: false, DestDir: dest, CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()}, - - // earthly needs to set this to nil so it does not conflict with verbose output of file copies - // (warning: it's not even feasible to reassign ProgressCb to nil as the call to newProgressHandler causes it to print out immediately) - //ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), - - Differ: ls.src.Differ, + ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), + Differ: ls.src.Differ, } if idmap := mount.IdentityMapping(); idmap != nil { @@ -190,6 +186,11 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal } stat.Uid = uint32(identity.UID) stat.Gid = uint32(identity.GID) + // earthly-specific + // whatever permissions the user has, give them to group and others as well + // this matches behavior of gitsource, given that umask is 0 + umode := (stat.Mode & 0700) >> 6 + stat.Mode = (stat.Mode ^ (stat.Mode & 0777)) | umode | (umode << 3) | (umode << 6) return true } } diff --git a/util/contentutil/multiprovider.go b/util/contentutil/multiprovider.go index 138abc6b9461..2893899be9a9 100644 --- a/util/contentutil/multiprovider.go +++ b/util/contentutil/multiprovider.go @@ -26,26 +26,6 @@ type MultiProvider struct { sub map[digest.Digest]content.Provider } -func (mp *MultiProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error { - type checkDescriptor interface { - CheckDescriptor(context.Context, ocispecs.Descriptor) error - } - - mp.mu.RLock() - if p, ok := mp.sub[desc.Digest]; ok { - mp.mu.RUnlock() - if cd, ok := p.(checkDescriptor); ok { - return cd.CheckDescriptor(ctx, desc) - } - } else { - mp.mu.RUnlock() - } - if cd, ok := mp.base.(checkDescriptor); ok { - return cd.CheckDescriptor(ctx, desc) - } - return nil -} - // ReaderAt returns a content.ReaderAt func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { mp.mu.RLock() diff --git a/util/entitlements/security/security_linux.go b/util/entitlements/security/security_linux.go index ccff21f541db..0f285fb2edd1 100644 --- a/util/entitlements/security/security_linux.go +++ b/util/entitlements/security/security_linux.go @@ -4,11 +4,9 @@ import ( "context" "fmt" "os" - "sync" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/pkg/cap" "github.com/containerd/containerd/pkg/userns" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -19,11 +17,46 @@ import ( // WithInsecureSpec sets spec with All capability. func WithInsecureSpec() oci.SpecOpts { return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { - addCaps, err := getAllCaps() - if err != nil { - return err + addCaps := []string{ + "CAP_FSETID", + "CAP_KILL", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_SETFCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_AUDIT_WRITE", + "CAP_MAC_ADMIN", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_SYS_PTRACE", + "CAP_SYS_MODULE", + "CAP_SYSLOG", + "CAP_SYS_RAWIO", + "CAP_SYS_ADMIN", + "CAP_LINUX_IMMUTABLE", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_PACCT", + "CAP_SYS_TTY_CONFIG", + "CAP_SYS_TIME", + "CAP_WAKE_ALARM", + "CAP_AUDIT_READ", + "CAP_AUDIT_CONTROL", + "CAP_SYS_RESOURCE", + "CAP_BLOCK_SUSPEND", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_LEASE", + "CAP_NET_ADMIN", + "CAP_NET_BROADCAST", } - s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, addCaps...) s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, addCaps...) s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, addCaps...) @@ -127,76 +160,3 @@ func getFreeLoopID() (int, error) { } return 0, errors.Errorf("error getting free loop device: %v", uerr) } - -var ( - currentCaps []string - currentCapsErr error - currentCapsOnce sync.Once -) - -func getCurrentCaps() ([]string, error) { - currentCapsOnce.Do(func() { - currentCaps, currentCapsErr = cap.Current() - }) - - return currentCaps, currentCapsErr -} - -func getAllCaps() ([]string, error) { - availableCaps, err := getCurrentCaps() - if err != nil { - return nil, fmt.Errorf("error getting current capabilities: %s", err) - } - - // see if any of the base linux35Caps are not available to be granted - // they are either not supported by the kernel or dropped at the process level - for _, cap := range availableCaps { - if _, exists := linux35Caps[cap]; !exists { - logrus.Warnf("capability %s could not be granted for insecure mode", cap) - } - } - - return availableCaps, nil -} - -// linux35Caps provides a list of capabilities available on Linux 3.5 kernel -var linux35Caps = map[string]struct{}{ - "CAP_FSETID": {}, - "CAP_KILL": {}, - "CAP_FOWNER": {}, - "CAP_MKNOD": {}, - "CAP_CHOWN": {}, - "CAP_DAC_OVERRIDE": {}, - "CAP_NET_RAW": {}, - "CAP_SETGID": {}, - "CAP_SETUID": {}, - "CAP_SETPCAP": {}, - "CAP_SETFCAP": {}, - "CAP_NET_BIND_SERVICE": {}, - "CAP_SYS_CHROOT": {}, - "CAP_AUDIT_WRITE": {}, - "CAP_MAC_ADMIN": {}, - "CAP_MAC_OVERRIDE": {}, - "CAP_DAC_READ_SEARCH": {}, - "CAP_SYS_PTRACE": {}, - "CAP_SYS_MODULE": {}, - "CAP_SYSLOG": {}, - "CAP_SYS_RAWIO": {}, - "CAP_SYS_ADMIN": {}, - "CAP_LINUX_IMMUTABLE": {}, - "CAP_SYS_BOOT": {}, - "CAP_SYS_NICE": {}, - "CAP_SYS_PACCT": {}, - "CAP_SYS_TTY_CONFIG": {}, - "CAP_SYS_TIME": {}, - "CAP_WAKE_ALARM": {}, - "CAP_AUDIT_READ": {}, - "CAP_AUDIT_CONTROL": {}, - "CAP_SYS_RESOURCE": {}, - "CAP_BLOCK_SUSPEND": {}, - "CAP_IPC_LOCK": {}, - "CAP_IPC_OWNER": {}, - "CAP_LEASE": {}, - "CAP_NET_ADMIN": {}, - "CAP_NET_BROADCAST": {}, -} diff --git a/util/resolver/authorizer.go b/util/resolver/authorizer.go index f45efb1892bc..24bbf8b31774 100644 --- a/util/resolver/authorizer.go +++ b/util/resolver/authorizer.go @@ -337,7 +337,7 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se r.expires = exp } } else if errors.Is(err, context.Canceled) { - // prevent context canceled errors from being permanent + // earthly-specific prevent context canceled errors from being permanent r.expires = time.Now() } }() diff --git a/util/resolver/limited/group.go b/util/resolver/limited/group.go index 7fdd947a02a5..6c0ba3a9eaad 100644 --- a/util/resolver/limited/group.go +++ b/util/resolver/limited/group.go @@ -11,15 +11,12 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) -type contextKeyT string - -var contextKey = contextKeyT("buildkit/util/resolver/limited") - var Default = New(4) type Group struct { @@ -33,13 +30,7 @@ type req struct { ref string } -func (r *req) acquire(ctx context.Context, desc ocispecs.Descriptor) (context.Context, func(), error) { - if v := ctx.Value(contextKey); v != nil { - return ctx, func() {}, nil - } - - ctx = context.WithValue(ctx, contextKey, struct{}{}) - +func (r *req) acquire(ctx context.Context, desc ocispecs.Descriptor) (func(), error) { // json request get one additional connection highPriority := strings.HasSuffix(desc.MediaType, "+json") @@ -55,16 +46,16 @@ func (r *req) acquire(ctx context.Context, desc ocispecs.Descriptor) (context.Co r.g.mu.Unlock() if !highPriority { if err := s[0].Acquire(ctx, 1); err != nil { - return ctx, nil, err + return nil, err } } if err := s[1].Acquire(ctx, 1); err != nil { if !highPriority { s[0].Release(1) } - return ctx, nil, err + return nil, err } - return ctx, func() { + return func() { s[1].Release(1) if !highPriority { s[0].Release(1) @@ -87,17 +78,60 @@ func (g *Group) WrapFetcher(f remotes.Fetcher, ref string) remotes.Fetcher { return &fetcher{Fetcher: f, req: g.req(ref)} } -func (g *Group) PushHandler(pusher remotes.Pusher, provider content.Provider, ref string) images.HandlerFunc { - ph := remotes.PushHandler(pusher, provider) - req := g.req(ref) - return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { - ctx, release, err := req.acquire(ctx, desc) - if err != nil { - return nil, err +func (g *Group) WrapPusher(p remotes.Pusher, ref string) remotes.Pusher { + return &pusher{Pusher: p, req: g.req(ref)} +} + +type pusher struct { + remotes.Pusher + req *req +} + +func (p *pusher) Push(ctx context.Context, desc ocispecs.Descriptor) (content.Writer, error) { + release, err := p.req.acquire(ctx, desc) + if err != nil { + return nil, err + } + w, err := p.Pusher.Push(ctx, desc) + if err != nil { + release() + return nil, err + } + ww := &writer{Writer: w} + closer := func() { + if !ww.closed { + logrus.Warnf("writer not closed cleanly: %s", desc.Digest) } - defer release() - return ph(ctx, desc) + release() } + ww.release = closer + runtime.SetFinalizer(ww, func(rc *writer) { + rc.close() + }) + return ww, nil +} + +type writer struct { + content.Writer + once sync.Once + release func() + closed bool +} + +func (w *writer) Close() error { + w.closed = true + w.close() + return w.Writer.Close() +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + w.closed = true + w.close() + return w.Writer.Commit(ctx, size, expected, opts...) +} + +func (w *writer) close() { + w.once.Do(w.release) } type fetcher struct { @@ -106,7 +140,7 @@ type fetcher struct { } func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { - ctx, release, err := f.req.acquire(ctx, desc) + release, err := f.req.acquire(ctx, desc) if err != nil { return nil, err } @@ -162,7 +196,7 @@ func FetchHandler(ingester content.Ingester, fetcher remotes.Fetcher, ref string } func PushHandler(pusher remotes.Pusher, provider content.Provider, ref string) images.HandlerFunc { - return Default.PushHandler(pusher, provider, ref) + return remotes.PushHandler(Default.WrapPusher(pusher, ref), provider) } func domain(ref string) string { diff --git a/vendor/github.com/dimchansky/utfbom/.gitignore b/vendor/github.com/dimchansky/utfbom/.gitignore deleted file mode 100644 index d7ec5cebb98d..000000000000 --- a/vendor/github.com/dimchansky/utfbom/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib -*.o -*.a - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.prof - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -# Gogland -.idea/ \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml deleted file mode 100644 index 19312ee35fc0..000000000000 --- a/vendor/github.com/dimchansky/utfbom/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go -sudo: false - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - 1.15.x - -cache: - directories: - - $HOME/.cache/go-build - - $HOME/gopath/pkg/mod - -env: - global: - - GO111MODULE=on - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - - go get golang.org/x/tools/cmd/goimports - - go get golang.org/x/lint/golint -script: - - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false) - - golint ./... # This won't break the build, just show warnings - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE deleted file mode 100644 index 6279cb87f434..000000000000 --- a/vendor/github.com/dimchansky/utfbom/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md deleted file mode 100644 index 8ece280089a5..000000000000 --- a/vendor/github.com/dimchansky/utfbom/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) - -The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. - -## Installation - - go get -u github.com/dimchansky/utfbom - -## Example - -```go -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - - "github.com/dimchansky/utfbom" -) - -func main() { - trySkip([]byte("\xEF\xBB\xBFhello")) - trySkip([]byte("hello")) -} - -func trySkip(byteData []byte) { - fmt.Println("Input:", byteData) - - // just skip BOM - output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) - if err != nil { - fmt.Println(err) - return - } - fmt.Println("ReadAll with BOM skipping", output) - - // skip BOM and detect encoding - sr, enc := utfbom.Skip(bytes.NewReader(byteData)) - fmt.Printf("Detected encoding: %s\n", enc) - output, err = ioutil.ReadAll(sr) - if err != nil { - fmt.Println(err) - return - } - fmt.Println("ReadAll with BOM detection and skipping", output) - fmt.Println() -} -``` - -Output: - -``` -$ go run main.go -Input: [239 187 191 104 101 108 108 111] -ReadAll with BOM skipping [104 101 108 108 111] -Detected encoding: UTF8 -ReadAll with BOM detection and skipping [104 101 108 108 111] - -Input: [104 101 108 108 111] -ReadAll with BOM skipping [104 101 108 108 111] -Detected encoding: Unknown -ReadAll with BOM detection and skipping [104 101 108 108 111] -``` - - diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go deleted file mode 100644 index 77a303e564b1..000000000000 --- a/vendor/github.com/dimchansky/utfbom/utfbom.go +++ /dev/null @@ -1,192 +0,0 @@ -// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. -// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader -// interface but provides automatic BOM checking and removing as necessary. -package utfbom - -import ( - "errors" - "io" -) - -// Encoding is type alias for detected UTF encoding. -type Encoding int - -// Constants to identify detected UTF encodings. -const ( - // Unknown encoding, returned when no BOM was detected - Unknown Encoding = iota - - // UTF8, BOM bytes: EF BB BF - UTF8 - - // UTF-16, big-endian, BOM bytes: FE FF - UTF16BigEndian - - // UTF-16, little-endian, BOM bytes: FF FE - UTF16LittleEndian - - // UTF-32, big-endian, BOM bytes: 00 00 FE FF - UTF32BigEndian - - // UTF-32, little-endian, BOM bytes: FF FE 00 00 - UTF32LittleEndian -) - -// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. -func (e Encoding) String() string { - switch e { - case UTF8: - return "UTF8" - case UTF16BigEndian: - return "UTF16BigEndian" - case UTF16LittleEndian: - return "UTF16LittleEndian" - case UTF32BigEndian: - return "UTF32BigEndian" - case UTF32LittleEndian: - return "UTF32LittleEndian" - default: - return "Unknown" - } -} - -const maxConsecutiveEmptyReads = 100 - -// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. -// It also returns the encoding detected by the BOM. -// If the detected encoding is not needed, you can call the SkipOnly function. -func Skip(rd io.Reader) (*Reader, Encoding) { - // Is it already a Reader? - b, ok := rd.(*Reader) - if ok { - return b, Unknown - } - - enc, left, err := detectUtf(rd) - return &Reader{ - rd: rd, - buf: left, - err: err, - }, enc -} - -// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. -func SkipOnly(rd io.Reader) *Reader { - r, _ := Skip(rd) - return r -} - -// Reader implements automatic BOM (Unicode Byte Order Mark) checking and -// removing as necessary for an io.Reader object. -type Reader struct { - rd io.Reader // reader provided by the client - buf []byte // buffered data - err error // last error -} - -// Read is an implementation of io.Reader interface. -// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. -func (r *Reader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - - if r.buf == nil { - if r.err != nil { - return 0, r.readErr() - } - - return r.rd.Read(p) - } - - // copy as much as we can - n = copy(p, r.buf) - r.buf = nilIfEmpty(r.buf[n:]) - return n, nil -} - -func (r *Reader) readErr() error { - err := r.err - r.err = nil - return err -} - -var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") - -func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { - buf, err = readBOM(rd) - - if len(buf) >= 4 { - if isUTF32BigEndianBOM4(buf) { - return UTF32BigEndian, nilIfEmpty(buf[4:]), err - } - if isUTF32LittleEndianBOM4(buf) { - return UTF32LittleEndian, nilIfEmpty(buf[4:]), err - } - } - - if len(buf) > 2 && isUTF8BOM3(buf) { - return UTF8, nilIfEmpty(buf[3:]), err - } - - if (err != nil && err != io.EOF) || (len(buf) < 2) { - return Unknown, nilIfEmpty(buf), err - } - - if isUTF16BigEndianBOM2(buf) { - return UTF16BigEndian, nilIfEmpty(buf[2:]), err - } - if isUTF16LittleEndianBOM2(buf) { - return UTF16LittleEndian, nilIfEmpty(buf[2:]), err - } - - return Unknown, nilIfEmpty(buf), err -} - -func readBOM(rd io.Reader) (buf []byte, err error) { - const maxBOMSize = 4 - var bom [maxBOMSize]byte // used to read BOM - - // read as many bytes as possible - for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { - if n, err = rd.Read(bom[len(buf):]); n < 0 { - panic(errNegativeRead) - } - if n > 0 { - nEmpty = 0 - } else { - nEmpty++ - if nEmpty >= maxConsecutiveEmptyReads { - err = io.ErrNoProgress - } - } - } - return -} - -func isUTF32BigEndianBOM4(buf []byte) bool { - return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF -} - -func isUTF32LittleEndianBOM4(buf []byte) bool { - return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 -} - -func isUTF8BOM3(buf []byte) bool { - return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF -} - -func isUTF16BigEndianBOM2(buf []byte) bool { - return buf[0] == 0xFE && buf[1] == 0xFF -} - -func isUTF16LittleEndianBOM2(buf []byte) bool { - return buf[0] == 0xFF && buf[1] == 0xFE -} - -func nilIfEmpty(buf []byte) (res []byte) { - if len(buf) > 0 { - res = buf - } - return -} diff --git a/vendor/github.com/tonistiigi/go-actions-cache/LICENSE b/vendor/github.com/tonistiigi/go-actions-cache/LICENSE deleted file mode 100644 index d79d5687d0b7..000000000000 --- a/vendor/github.com/tonistiigi/go-actions-cache/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Tõnis Tiigi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/tonistiigi/go-actions-cache/cache.go b/vendor/github.com/tonistiigi/go-actions-cache/cache.go index e860a3b7789b..c8a391602164 100644 --- a/vendor/github.com/tonistiigi/go-actions-cache/cache.go +++ b/vendor/github.com/tonistiigi/go-actions-cache/cache.go @@ -18,7 +18,6 @@ import ( "time" jwt "github.com/dgrijalva/jwt-go" - "github.com/dimchansky/utfbom" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -80,9 +79,7 @@ func TryEnv(opt Opt) (*Cache, error) { } type Opt struct { - Client *http.Client - Timeout time.Duration - BackoffPool *BackoffPool + Client *http.Client } func New(token, url string, opt Opt) (*Cache, error) { @@ -140,13 +137,6 @@ func New(token, url string, opt Opt) (*Cache, error) { if opt.Client == nil { opt.Client = http.DefaultClient } - if opt.Timeout == 0 { - opt.Timeout = 5 * time.Minute - } - - if opt.BackoffPool == nil { - opt.BackoffPool = defaultBackoffPool - } return &Cache{ opt: opt, @@ -208,11 +198,15 @@ func (c *Cache) Load(ctx context.Context, keys ...string) (*Entry, error) { q.Set("keys", strings.Join(keys, ",")) q.Set("version", version(keys[0])) req.URL.RawQuery = q.Encode() + req = req.WithContext(ctx) Log("load cache %s", req.URL.String()) - resp, err := c.doWithRetries(ctx, req) + resp, err := c.opt.Client.Do(req) if err != nil { return nil, errors.WithStack(err) } + if err := checkResponse(resp); err != nil { + return nil, err + } var ce Entry dt, err := ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { @@ -243,11 +237,15 @@ func (c *Cache) reserve(ctx context.Context, key string) (int, error) { c.auth(req) c.accept(req) req.Header.Set("Content-Type", "application/json") + req = req.WithContext(ctx) Log("save cache req %s body=%s", req.URL.String(), dt) - resp, err := c.doWithRetries(ctx, req) + resp, err := c.opt.Client.Do(req) if err != nil { return 0, errors.WithStack(err) } + if err := checkResponse(resp); err != nil { + return 0, err + } dt, err = ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { @@ -277,10 +275,13 @@ func (c *Cache) commit(ctx context.Context, id int, size int64) error { c.accept(req) req.Header.Set("Content-Type", "application/json") Log("commit cache %s, size %d", req.URL.String(), size) - resp, err := c.doWithRetries(ctx, req) + resp, err := c.opt.Client.Do(req) if err != nil { return errors.Wrapf(err, "error committing cache %d", id) } + if err := checkResponse(resp); err != nil { + return err + } dt, err = ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { return err @@ -411,10 +412,13 @@ func (c *Cache) uploadChunk(ctx context.Context, id int, ra io.ReaderAt, off, n req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/*", off, off+n-1)) Log("upload cache chunk %s, range %d-%d", req.URL.String(), off, off+n-1) - resp, err := c.doWithRetries(ctx, req) + resp, err := c.opt.Client.Do(req) if err != nil { return errors.WithStack(err) } + if err := checkResponse(resp); err != nil { + return err + } dt, err := ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { return errors.WithStack(err) @@ -425,38 +429,6 @@ func (c *Cache) uploadChunk(ctx context.Context, id int, ra io.ReaderAt, off, n return resp.Body.Close() } -func (c *Cache) doWithRetries(ctx context.Context, req *http.Request) (*http.Response, error) { - req = req.WithContext(ctx) - var err error - max := time.Now().Add(c.opt.Timeout) - for { - if err1 := c.opt.BackoffPool.Wait(ctx, time.Until(max)); err1 != nil { - if err != nil { - return nil, errors.Wrapf(err, "%v", err1) - } - return nil, err1 - } - var resp *http.Response - resp, err = c.opt.Client.Do(req) - if err != nil { - return nil, errors.WithStack(err) - } - if err := checkResponse(resp); err != nil { - var he HTTPError - if errors.As(err, &he) { - if he.StatusCode == http.StatusTooManyRequests { - c.opt.BackoffPool.Delay() - continue - } - } - c.opt.BackoffPool.Reset() - return nil, err - } - c.opt.BackoffPool.Reset() - return resp, nil - } -} - func (c *Cache) auth(r *http.Request) { r.Header.Add("Authorization", "Bearer "+c.Token.Raw) } @@ -563,47 +535,29 @@ func (e GithubAPIError) Is(err error) bool { return false } -type HTTPError struct { - StatusCode int - Err error -} - -func (e HTTPError) Error() string { - return e.Err.Error() -} - -func (e HTTPError) Unwrap() error { - return e.Err -} - func checkResponse(resp *http.Response) error { if resp.StatusCode >= 200 && resp.StatusCode < 300 { return nil } - dt, err := ioutil.ReadAll(utfbom.SkipOnly(io.LimitReader(resp.Body, 32*1024))) + dt, err := ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { return errors.WithStack(err) } var gae GithubAPIError - if err1 := json.Unmarshal(dt, &gae); err1 != nil { - err = errors.Wrapf(err1, "failed to parse error response %d: %s", resp.StatusCode, dt) - } else if gae.Message != "" { - err = errors.WithStack(gae) - } else { - err = errors.Errorf("unknown error %s: %s", resp.Status, dt) + if err := json.Unmarshal(dt, &gae); err != nil { + return errors.Wrapf(err, "failed to parse error response %d: %s", resp.StatusCode, dt) } - - return HTTPError{ - StatusCode: resp.StatusCode, - Err: err, + if gae.Message != "" { + return errors.WithStack(gae) } + return errors.Errorf("unknown error %d: %s", resp.StatusCode, dt) } func decryptToken(enc, pass string) (string, string, error) { // openssl key derivation uses some non-standard algorithm so exec instead of using go libraries // this is only used on testing anyway - cmd := exec.Command("openssl", "enc", "-d", "-aes-256-cbc", "-a", "-A", "-salt", "-md", "sha256", "-pass", "env:GHCACHE_TOKEN_PW") - cmd.Env = append(cmd.Env, fmt.Sprintf("GHCACHE_TOKEN_PW=%s", pass)) + cmd := exec.Command("openssl", "enc", "-d", "-aes-256-cbc", "-a", "-A", "-salt", "-md", "sha256", "-pass", "env:GOCACHE_TOKEN_PW") + cmd.Env = append(cmd.Env, fmt.Sprintf("GOCACHE_TOKEN_PW=%s", pass)) cmd.Stdin = bytes.NewReader([]byte(enc)) buf := &bytes.Buffer{} cmd.Stdout = buf diff --git a/vendor/github.com/tonistiigi/go-actions-cache/retry.go b/vendor/github.com/tonistiigi/go-actions-cache/retry.go deleted file mode 100644 index 9487048f88e8..000000000000 --- a/vendor/github.com/tonistiigi/go-actions-cache/retry.go +++ /dev/null @@ -1,108 +0,0 @@ -package actionscache - -import ( - "context" - "sync" - "time" - - "github.com/pkg/errors" -) - -const maxBackoff = time.Second * 90 -const minBackoff = time.Second * 1 - -var defaultBackoffPool = &BackoffPool{} - -type BackoffPool struct { - mu sync.Mutex - queue []chan struct{} - timer *time.Timer - backoff time.Duration - target time.Time -} - -func (b *BackoffPool) Wait(ctx context.Context, timeout time.Duration) error { - b.mu.Lock() - if b.timer == nil { - b.mu.Unlock() - return nil - } - - done := make(chan struct{}) - b.queue = append(b.queue, done) - - b.mu.Unlock() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-done: - return nil - case <-time.After(timeout): - return errors.Errorf("maximum timeout reached") - } -} - -func (b *BackoffPool) Reset() { - b.mu.Lock() - b.reset() - b.backoff = 0 - b.mu.Unlock() -} -func (b *BackoffPool) reset() { - for _, done := range b.queue { - close(done) - } - b.queue = nil - if b.timer != nil { - b.timer.Stop() - b.timer = nil - } -} - -func (b *BackoffPool) trigger(t *time.Timer) { - b.mu.Lock() - if b.timer != t { - // this timer is not the current one - b.mu.Unlock() - return - } - - b.reset() - b.backoff = b.backoff * 2 - if b.backoff > maxBackoff { - b.backoff = maxBackoff - } - b.mu.Unlock() -} - -func (b *BackoffPool) Delay() { - b.mu.Lock() - if b.timer != nil { - minTime := time.Now().Add(minBackoff) - if b.target.Before(minTime) { - b.target = minTime - b.timer.Stop() - b.setupTimer() - } - b.mu.Unlock() - return - } - - if b.backoff == 0 { - b.backoff = minBackoff - } - - b.target = time.Now().Add(b.backoff) - b.setupTimer() - - b.mu.Unlock() -} - -func (b *BackoffPool) setupTimer() { - var t *time.Timer - b.timer = time.AfterFunc(time.Until(b.target), func() { - b.trigger(t) - }) - t = b.timer -} diff --git a/vendor/modules.txt b/vendor/modules.txt index db6d203d7095..d44b3f7de821 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -201,9 +201,6 @@ github.com/davecgh/go-spew/spew # github.com/dgrijalva/jwt-go v3.2.0+incompatible ## explicit github.com/dgrijalva/jwt-go -# github.com/dimchansky/utfbom v1.1.1 -## explicit -github.com/dimchansky/utfbom # github.com/docker/cli v20.10.8+incompatible ## explicit github.com/docker/cli/cli/config @@ -500,7 +497,7 @@ github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/prefix github.com/tonistiigi/fsutil/types -# github.com/tonistiigi/go-actions-cache v0.0.0-20211002214948-4d48f2ff622a +# github.com/tonistiigi/go-actions-cache v0.0.0-20210714033416-b93d7f1b2e70 ## explicit; go 1.16 github.com/tonistiigi/go-actions-cache # github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea diff --git a/worker/base/worker.go b/worker/base/worker.go index 82c0e204447b..f80a266b4b55 100644 --- a/worker/base/worker.go +++ b/worker/base/worker.go @@ -49,7 +49,6 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) @@ -356,24 +355,6 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, } func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cache.ImmutableRef, err error) { - if cd, ok := remote.Provider.(interface { - CheckDescriptor(context.Context, ocispecs.Descriptor) error - }); ok && len(remote.Descriptors) > 0 { - var eg errgroup.Group - for _, desc := range remote.Descriptors { - desc := desc - eg.Go(func() error { - if err := cd.CheckDescriptor(ctx, desc); err != nil { - return err - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, err - } - } - descHandler := &cache.DescHandler{ Provider: func(session.Group) content.Provider { return remote.Provider }, Progress: &controller.Controller{